code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def _lowercase( __a : list[int] ):
if not numbers:
return 0
if not isinstance(__a , (list, tuple) ) or not all(
isinstance(__a , __a ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
a__ =a__ =a__ =numbers[0]
for i in range(1 , len(__a ) ):
# update the maximum and minimum subarray products
a__ =numbers[i]
if number < 0:
a__ , a__ =min_till_now, max_till_now
a__ =max(__a , max_till_now * number )
a__ =min(__a , min_till_now * number )
# update the maximum product found till now
a__ =max(__a , __a )
return max_prod
| 20 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =BlenderbotSmallTokenizer
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
super().setUp()
a__ =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
a__ =dict(zip(lowercase_ , range(len(lowercase_))))
a__ =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
a__ ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase_))
def __UpperCamelCase ( self , **lowercase_) -> int:
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Tuple:
a__ ='adapt act apte'
a__ ='adapt act apte'
return input_text, output_text
def __UpperCamelCase ( self) -> Dict:
a__ =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a__ ='adapt act apte'
a__ =['adapt', 'act', 'ap@@', 'te']
a__ =tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
a__ =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
a__ =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [1384]
a__ ='I am a small frog.'
a__ =tok([src_text] , padding=lowercase_ , truncation=lowercase_)['input_ids']
a__ =tok.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __UpperCamelCase ( self) -> List[Any]:
a__ =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
a__ ='I am a small frog .'
a__ ='.'
a__ =tok(lowercase_)['input_ids']
a__ =tok(lowercase_)['input_ids']
assert encoded[-1] == encoded_dot[0]
| 20 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 1 |
from maths.prime_factors import prime_factors
def _lowercase( __a : int ):
if not isinstance(__a , __a ):
a__ =f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Any:
a__ =parent
a__ =13
a__ =7
a__ =True
a__ =True
a__ =True
a__ =True
a__ =99
a__ =384
a__ =2
a__ =4
a__ =37
a__ ='gelu'
a__ =0.1
a__ =0.1
a__ =512
a__ =16
a__ =2
a__ =0.02
a__ =3
a__ =4
a__ =128
a__ =2
a__ =9
a__ =1
a__ =None
def __UpperCamelCase ( self) -> Dict:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length])
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ =ids_tensor([self.batch_size] , self.num_choices)
a__ =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =TFConvBertModel(config=lowercase_)
a__ ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ =[input_ids, input_mask]
a__ =model(lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =TFConvBertForMaskedLM(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =self.num_labels
a__ =TFConvBertForSequenceClassification(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =self.num_choices
a__ =TFConvBertForMultipleChoice(config=lowercase_)
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =self.num_labels
a__ =TFConvBertForTokenClassification(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =TFConvBertForQuestionAnswering(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case =(
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> str:
a__ =TFConvBertModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> int:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =True
if hasattr(lowercase_ , 'use_cache'):
a__ =True
a__ =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
for model_class in self.all_model_classes:
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model_class(lowercase_)
a__ =len(model(lowercase_))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_)
a__ =os.path.join(lowercase_ , 'saved_model' , '1')
a__ =tf.keras.models.load_model(lowercase_)
a__ =model(lowercase_)
if self.is_encoder_decoder:
a__ =outputs['encoder_hidden_states']
a__ =outputs['encoder_attentions']
else:
a__ =outputs['hidden_states']
a__ =outputs['attentions']
self.assertEqual(len(lowercase_) , lowercase_)
a__ =getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase_) , lowercase_)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self) -> List[Any]:
a__ =TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
def check_decoder_attentions_output(lowercase_):
a__ =len(lowercase_)
self.assertEqual(out_len % 2 , 0)
a__ =outputs.decoder_attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase_):
a__ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a__ =True
a__ =False
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
a__ =len(lowercase_)
self.assertEqual(config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
if self.is_encoder_decoder:
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(config.output_hidden_states , lowercase_)
check_decoder_attentions_output(lowercase_)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ =True
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
# Check attention is always last and order is fine
a__ =True
a__ =True
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_))
self.assertEqual(model.config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
@require_tf
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
a__ =tf.constant([[0, 1, 2, 3, 4, 5]])
a__ =model(lowercase_)[0]
a__ =[1, 6, 768]
self.assertEqual(output.shape , lowercase_)
a__ =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4)
| 20 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase: list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase: set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowercase( __a : list[int] , __a : tuple[int, ...] ):
a__ =""
a__ =42
a__ =42
a__ =42
for keychar, cipherchar in zip(cycle(__a ) , __a ):
a__ =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__a )
return decoded
def _lowercase( __a : list[int] ):
a__ =[]
for key in product(__a , repeat=3 ):
a__ =try_key(__a , __a )
if encoded is not None:
possibles.append(__a )
return possibles
def _lowercase( __a : list[str] , __a : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowercase( __a : str = "p059_cipher.txt" ):
a__ =42
a__ =42
a__ =42
a__ =42
a__ =Path(__a ).parent.joinpath(__a ).read_text(encoding='utf-8' )
a__ =[int(__a ) for number in data.strip().split(',' )]
a__ =filter_valid_chars(__a )
for common_word in COMMON_WORDS:
a__ =filter_common_word(__a , __a )
if len(__a ) == 1:
break
a__ =possibles[0]
return sum(ord(__a ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase: Optional[int] = '__DUMMY_TRANSFORMERS_USER__'
_lowerCAmelCase: Tuple = 'Dummy User'
_lowerCAmelCase: Optional[int] = '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'
_lowerCAmelCase: List[str] = 'https://hub-ci.huggingface.co'
_lowerCAmelCase: List[str] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_lowerCAmelCase: Dict = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_lowerCAmelCase: int = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _lowercase( __a : Dict ):
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __a )
@pytest.fixture
def _lowercase( __a : int ):
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __a )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __a )
@pytest.fixture
def _lowercase( __a : Optional[int] ):
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __a )
@pytest.fixture
def _lowercase( __a : Tuple , __a : List[Any] ):
HfFolder.save_token(__a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def _lowercase( ):
return HfApi(endpoint=__a )
@pytest.fixture(scope='session' )
def _lowercase( __a : HfApi ):
a__ =HfFolder.get_token()
HfFolder.save_token(__a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__a )
@pytest.fixture
def _lowercase( __a : str ):
def _cleanup_repo(__a : Optional[int] ):
hf_api.delete_repo(__a , token=__a , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def _lowercase( __a : Dict ):
@contextmanager
def _temporary_repo(__a : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(__a )
return _temporary_repo
@pytest.fixture(scope='session' )
def _lowercase( __a : HfApi , __a : List[Any] , __a : Union[str, Any] ):
a__ =f"""repo_txt_data-{int(time.time() * 10e3 )}"""
a__ =f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a , token=__a , repo_type='dataset' , private=__a )
hf_api.upload_file(
token=__a , path_or_fileobj=str(__a ) , path_in_repo='data/text_data.txt' , repo_id=__a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__a , token=__a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase( __a : Tuple , __a : Any , __a : List[Any] ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def _lowercase( __a : HfApi , __a : Any , __a : Tuple ):
a__ =f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
a__ =f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a , token=__a , repo_type='dataset' , private=__a )
hf_api.upload_file(
token=__a , path_or_fileobj=str(__a ) , path_in_repo='data.zip' , repo_id=__a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__a , token=__a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase( __a : Optional[int] , __a : Dict , __a : int ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def _lowercase( __a : HfApi , __a : List[str] , __a : int ):
a__ =f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
a__ =f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a , token=__a , repo_type='dataset' , private=__a )
hf_api.upload_file(
token=__a , path_or_fileobj=str(__a ) , path_in_repo='data.zip' , repo_id=__a , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__a , token=__a , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase( __a : List[str] , __a : Any , __a : Optional[int] ):
return hf_private_dataset_repo_zipped_img_data_
| 20 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 1 |
import os
from collections.abc import Iterator
def _lowercase( __a : str = "." ):
for dir_path, dir_names, filenames in os.walk(__a ):
a__ =[d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__a )[1] in (".py", ".ipynb"):
yield os.path.join(__a , __a ).lstrip('./' )
def _lowercase( __a : Dict ):
return f"""{i * ' '}*""" if i else "\n##"
def _lowercase( __a : str , __a : str ):
a__ =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__a ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(__a )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def _lowercase( __a : str = "." ):
a__ =''
for filepath in sorted(good_file_paths(__a ) ):
a__ , a__ =os.path.split(__a )
if filepath != old_path:
a__ =print_path(__a , __a )
a__ =(filepath.count(os.sep ) + 1) if filepath else 0
a__ =f"""{filepath}/{filename}""".replace(' ' , '%20' )
a__ =os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"""{md_prefix(__a )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 20 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase: Optional[Any] = datasets.logging.get_logger(__name__)
_lowerCAmelCase: int = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_lowerCAmelCase: Tuple = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_lowerCAmelCase: List[Any] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_lowerCAmelCase: List[Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCamelCase ( self , lowercase_) -> int:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').')
a__ ='bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
a__ =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
a__ =self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""")
# download the model checkpoint specified by self.config_name and set up the scorer
a__ =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
a__ =score.BleurtScorer(os.path.join(lowercase_ , lowercase_))
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =self.scorer.score(references=lowercase_ , candidates=lowercase_)
return {"scores": scores}
| 20 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_lowerCAmelCase: Any = 'src/diffusers'
# Matches is_xxx_available()
_lowerCAmelCase: Tuple = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_lowerCAmelCase: Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_lowerCAmelCase: Optional[Any] = '\n{0} = None\n'
_lowerCAmelCase: List[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_lowerCAmelCase: Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _lowercase( __a : List[Any] ):
a__ =_re_backend.findall(__a )
if len(__a ) == 0:
return None
return "_and_".join(__a )
def _lowercase( ):
with open(os.path.join(__a , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ =f.readlines()
# Get to the point we do the actual imports for type checking
a__ =0
a__ ={}
# Go through the end of the file
while line_index < len(__a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a__ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
a__ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__a ) and len(lines[line_index] ) > 1:
a__ =lines[line_index]
a__ =_re_single_line_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__a ) > 0:
a__ =objects
else:
line_index += 1
return backend_specific_objects
def _lowercase( __a : Optional[int] , __a : Any ):
if name.isupper():
return DUMMY_CONSTANT.format(__a )
elif name.islower():
return DUMMY_FUNCTION.format(__a , __a )
else:
return DUMMY_CLASS.format(__a , __a )
def _lowercase( __a : Tuple=None ):
if backend_specific_objects is None:
a__ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a__ ={}
for backend, objects in backend_specific_objects.items():
a__ ='[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
a__ ='# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__a , __a ) for o in objects] )
a__ =dummy_file
return dummy_files
def _lowercase( __a : List[str]=False ):
a__ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a__ ={'torch': 'pt'}
# Locate actual dummy modules and read their content.
a__ =os.path.join(__a , 'utils' )
a__ ={
backend: os.path.join(__a , f"""dummy_{short_names.get(__a , __a )}_objects.py""" )
for backend in dummy_files.keys()
}
a__ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__a ):
with open(__a , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ =f.read()
else:
a__ =''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(__a , __a )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"""diffusers.utils.dummy_{short_names.get(__a , __a )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCAmelCase: List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 1 |
def _lowercase( __a : int , __a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ =str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def _lowercase( __a : int , __a : int ):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
a__ =str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
a__ =binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def _lowercase( __a : int , __a : int ):
if number >= 0: # Get binary representation of positive number
a__ ='0' + str(bin(__a ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
a__ =len(bin(__a )[3:] ) # Find 2's complement of number
a__ =bin(abs(__a ) - (1 << binary_number_length) )[3:]
a__ =(
'1' + '0' * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 1 |
from PIL import Image
def _lowercase( __a : Image ):
a__ , a__ =image.size
a__ =0
a__ =image.load()
for i in range(__a ):
for j in range(__a ):
a__ =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
a__ =255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase: Any = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _lowercase( __a : Optional[int] , __a : Optional[int] ):
a__ =tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
a__ =DatasetInfosDict.from_directory(__a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _lowercase( __a : Tuple , __a : DatasetInfo ):
a__ =str(__a )
dataset_info.write_to_directory(__a )
a__ =DatasetInfo.from_directory(__a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__a , 'dataset_info.json' ) )
def _lowercase( ):
a__ =DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
a__ =dataset_info._to_yaml_dict()
assert sorted(__a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a__ =yaml.safe_dump(__a )
a__ =yaml.safe_load(__a )
assert dataset_info_yaml_dict == reloaded
def _lowercase( ):
a__ =DatasetInfo()
a__ =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _lowercase( __a : Any , __a : DatasetInfosDict ):
a__ =str(__a )
dataset_infos_dict.write_to_directory(__a )
a__ =DatasetInfosDict.from_directory(__a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a__ =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a__ =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__a , 'README.md' ) )
| 20 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
@staticmethod
def __UpperCamelCase ( *lowercase_ , **lowercase_) -> Any:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
snake_case =MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Tuple:
a__ =ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Optional[int]:
a__ =object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0)
self.assertGreater(len(lowercase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'score': ANY(lowercase_),
'label': ANY(lowercase_),
'box': {'xmin': ANY(lowercase_), 'ymin': ANY(lowercase_), 'xmax': ANY(lowercase_), 'ymax': ANY(lowercase_)},
} , )
import datasets
a__ =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
a__ =[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
a__ =object_detector(lowercase_ , threshold=0.0)
self.assertEqual(len(lowercase_) , len(lowercase_))
for outputs in batch_outputs:
self.assertGreater(len(lowercase_) , 0)
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
'score': ANY(lowercase_),
'label': ANY(lowercase_),
'box': {'xmin': ANY(lowercase_), 'ymin': ANY(lowercase_), 'xmax': ANY(lowercase_), 'ymax': ANY(lowercase_)},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF')
def __UpperCamelCase ( self) -> str:
pass
@require_torch
def __UpperCamelCase ( self) -> Dict:
a__ ='hf-internal-testing/tiny-detr-mobilenetsv3'
a__ =AutoModelForObjectDetection.from_pretrained(lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
a__ =ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_)
a__ =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
a__ =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self) -> Optional[Any]:
a__ ='facebook/detr-resnet-50'
a__ =AutoModelForObjectDetection.from_pretrained(lowercase_)
a__ =AutoFeatureExtractor.from_pretrained(lowercase_)
a__ =ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_)
a__ =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
a__ =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self) -> List[Any]:
a__ ='facebook/detr-resnet-50'
a__ =pipeline('object-detection' , model=lowercase_)
a__ =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
a__ =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ =0.99_85
a__ ='facebook/detr-resnet-50'
a__ =pipeline('object-detection' , model=lowercase_)
a__ =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=lowercase_)
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ ='Narsil/layoutlmv3-finetuned-funsd'
a__ =0.99_93
a__ =pipeline('object-detection' , model=lowercase_ , threshold=lowercase_)
a__ =object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')
self.assertEqual(
nested_simplify(lowercase_ , decimals=4) , [
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 20 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCAmelCase: List[Any] = trt.Logger(trt.Logger.WARNING)
_lowerCAmelCase: Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCAmelCase: int = logging.getLogger(__name__)
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_lowerCAmelCase: Any = parser.parse_args()
if args.tokenizer_name:
_lowerCAmelCase: Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_lowerCAmelCase: Dict = args.per_device_eval_batch_size
_lowerCAmelCase: str = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCAmelCase: Optional[int] = True
_lowerCAmelCase: Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
_lowerCAmelCase: Tuple = 'temp_engine/bert-fp16.engine'
if args.inta:
_lowerCAmelCase: Optional[Any] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_lowerCAmelCase: int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCAmelCase: Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCAmelCase: Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCAmelCase: Union[str, Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCAmelCase: Union[str, Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCAmelCase: str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowercase( __a : Tuple , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Tuple , __a : Union[str, Any] , __a : int , __a : Any ):
a__ =np.asarray(inputs['input_ids'] , dtype=np.intaa )
a__ =np.asarray(inputs['attention_mask'] , dtype=np.intaa )
a__ =np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __a )
# start time
a__ =time.time()
# Run inference
context.execute_async(
bindings=[int(__a ) for d_inp in d_inputs] + [int(__a ), int(__a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__a , __a , __a )
cuda.memcpy_dtoh_async(__a , __a , __a )
# Synchronize the stream and take time
stream.synchronize()
# end time
a__ =time.time()
a__ =end_time - start_time
a__ =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCAmelCase: Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase: str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCAmelCase: Tuple = raw_datasets['validation'].column_names
_lowerCAmelCase: List[str] = 'question' if 'question' in column_names else column_names[0]
_lowerCAmelCase: Union[str, Any] = 'context' if 'context' in column_names else column_names[1]
_lowerCAmelCase: Union[str, Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCAmelCase: Optional[Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_lowerCAmelCase: Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase( __a : List[str] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
a__ =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a__ =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__a , stride=args.doc_stride , return_overflowing_tokens=__a , return_offsets_mapping=__a , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a__ =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a__ =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a__ =tokenized_examples.sequence_ids(__a )
a__ =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a__ =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a__ =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowerCAmelCase: List[Any] = raw_datasets['validation']
# Validation Feature Creation
_lowerCAmelCase: Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_lowerCAmelCase: List[str] = default_data_collator
_lowerCAmelCase: Dict = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_lowerCAmelCase: Tuple = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase( __a : int , __a : Optional[int] , __a : List[Any] , __a : Optional[Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
a__ =postprocess_qa_predictions(
examples=__a , features=__a , predictions=__a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a__ =[
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
a__ =[{'id': k, 'prediction_text': v} for k, v in predictions.items()]
a__ =[{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__a , label_ids=__a )
_lowerCAmelCase: List[str] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase( __a : Tuple ):
return trt.volume(engine.get_binding_shape(__a ) ) * engine.get_binding_dtype(__a ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCAmelCase: Optional[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCAmelCase: Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCAmelCase: Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCAmelCase: Tuple = cuda.mem_alloc(h_outputa.nbytes)
_lowerCAmelCase: Any = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCAmelCase: str = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
_lowerCAmelCase: Any = 0.0
_lowerCAmelCase: str = 0
_lowerCAmelCase: Dict = timeit.default_timer()
_lowerCAmelCase: str = None
for step, batch in enumerate(eval_dataloader):
_lowerCAmelCase , _lowerCAmelCase: Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCAmelCase , _lowerCAmelCase: str = outputs
_lowerCAmelCase: List[Any] = torch.tensor(start_logits)
_lowerCAmelCase: str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCAmelCase: Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCAmelCase: List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCAmelCase: Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCAmelCase: List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCAmelCase: Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
_lowerCAmelCase: Union[str, Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
_lowerCAmelCase: Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCAmelCase: Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 20 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
def _lowercase( __a : int = 50 ):
a__ =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCAmelCase: List[str] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _lowercase( __a : List[Any]=None ):
if subparsers is not None:
a__ =subparsers.add_parser('tpu-config' , description=_description )
else:
a__ =argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
a__ =parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__a , default=__a , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__a , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__a , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
a__ =parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__a , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def _lowercase( __a : Tuple ):
a__ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__a ):
a__ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a__ =defaults.command_file
if not args.command and defaults.commands is not None:
a__ =defaults.commands
if not args.tpu_name:
a__ =defaults.tpu_name
if not args.tpu_zone:
a__ =defaults.tpu_zone
if args.accelerate_version == "dev":
a__ ='git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
a__ ='accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __a ):
a__ =f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
a__ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __a ):
a__ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a__ =['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a__ ='; '.join(__a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a__ =['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(__a )}""" )
return
subprocess.run(__a )
print('Successfully setup pod.' )
def _lowercase( ):
a__ =tpu_command_parser()
a__ =parser.parse_args()
tpu_command_launcher(__a )
| 20 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase: Optional[int] = (720, 1_280) # Height, Width
_lowerCAmelCase: Union[str, Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase: Optional[Any] = 1 / 100
_lowerCAmelCase: str = ''
_lowerCAmelCase: Optional[Any] = ''
_lowerCAmelCase: str = ''
_lowerCAmelCase: Union[str, Any] = 250
def _lowercase( ):
a__ , a__ =get_dataset(__a , __a )
for index in range(__a ):
a__ =random.sample(range(len(__a ) ) , 4 )
a__ , a__ , a__ =update_image_and_anno(
__a , __a , __a , __a , __a , filter_scale=__a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ =random_chars(32 )
a__ =path.split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ =f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , __a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
a__ =[]
for anno in new_annos:
a__ =anno[3] - anno[1]
a__ =anno[4] - anno[2]
a__ =anno[1] + width / 2
a__ =anno[2] + height / 2
a__ =f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__a )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowercase( __a : str , __a : str ):
a__ =[]
a__ =[]
for label_file in glob.glob(os.path.join(__a , '*.txt' ) ):
a__ =label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__a ) as in_file:
a__ =in_file.readlines()
a__ =os.path.join(__a , f"""{label_name}.jpg""" )
a__ =[]
for obj_list in obj_lists:
a__ =obj_list.rstrip('\n' ).split(' ' )
a__ =float(obj[1] ) - float(obj[3] ) / 2
a__ =float(obj[2] ) - float(obj[4] ) / 2
a__ =float(obj[1] ) + float(obj[3] ) / 2
a__ =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def _lowercase( __a : list , __a : list , __a : list[int] , __a : tuple[int, int] , __a : tuple[float, float] , __a : float = 0.0 , ):
a__ =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a__ =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a__ =int(scale_x * output_size[1] )
a__ =int(scale_y * output_size[0] )
a__ =[]
a__ =[]
for i, index in enumerate(__a ):
a__ =all_img_list[index]
path_list.append(__a )
a__ =all_annos[index]
a__ =cva.imread(__a )
if i == 0: # top-left
a__ =cva.resize(__a , (divid_point_x, divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =bbox[1] * scale_x
a__ =bbox[2] * scale_y
a__ =bbox[3] * scale_x
a__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a__ =cva.resize(__a , (output_size[1] - divid_point_x, divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =scale_x + bbox[1] * (1 - scale_x)
a__ =bbox[2] * scale_y
a__ =scale_x + bbox[3] * (1 - scale_x)
a__ =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a__ =cva.resize(__a , (divid_point_x, output_size[0] - divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =bbox[1] * scale_x
a__ =scale_y + bbox[2] * (1 - scale_y)
a__ =bbox[3] * scale_x
a__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a__ =cva.resize(
__a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a__ =img
for bbox in img_annos:
a__ =scale_x + bbox[1] * (1 - scale_x)
a__ =scale_y + bbox[2] * (1 - scale_y)
a__ =scale_x + bbox[3] * (1 - scale_x)
a__ =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a__ =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowercase( __a : int ):
assert number_char > 1, "The number of character should greater than 1"
a__ =ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 20 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 1 |
def _lowercase( __a : Optional[Any] , __a : Optional[int] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
a__ =(boundary[1] - boundary[0]) / steps
a__ =boundary[0]
a__ =boundary[1]
a__ =make_points(__a , __a , __a )
a__ =0.0
y += (h / 2.0) * f(__a )
for i in x_i:
# print(i)
y += h * f(__a )
y += (h / 2.0) * f(__a )
return y
def _lowercase( __a : Tuple , __a : str , __a : Union[str, Any] ):
a__ =a + h
while x < (b - h):
yield x
a__ =x + h
def _lowercase( __a : Dict ): # enter your function here
a__ =(x - 0) * (x - 0)
return y
def _lowercase( ):
a__ =0.0 # Lower bound of integration
a__ =1.0 # Upper bound of integration
a__ =10.0 # define number of steps or resolution
a__ =[a, b] # define boundary of integration
a__ =method_a(__a , __a )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 20 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='t5'
snake_case =['past_key_values']
snake_case ={'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase_=32128 , lowercase_=512 , lowercase_=64 , lowercase_=2048 , lowercase_=6 , lowercase_=None , lowercase_=8 , lowercase_=32 , lowercase_=128 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=1.0 , lowercase_="relu" , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=1 , **lowercase_ , ) -> Optional[Any]:
a__ =vocab_size
a__ =d_model
a__ =d_kv
a__ =d_ff
a__ =num_layers
a__ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ =num_heads
a__ =relative_attention_num_buckets
a__ =relative_attention_max_distance
a__ =dropout_rate
a__ =layer_norm_epsilon
a__ =initializer_factor
a__ =feed_forward_proj
a__ =use_cache
a__ =self.feed_forward_proj.split('-')
a__ =act_info[-1]
a__ =act_info[0] == 'gated'
if len(lowercase_) > 1 and act_info[0] != "gated" or len(lowercase_) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a__ ='gelu_new'
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
a__ ={
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a__ ='past_encoder_sequence + sequence'
a__ ={0: 'batch'}
a__ ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a__ ={0: 'batch', 1: 'decoder_sequence'}
a__ ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs')
return common_inputs
@property
def __UpperCamelCase ( self) -> int:
return 13
| 20 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> Dict:
a__ ={
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
a__ ={
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowercase_) , x.transpose()))
a__ =np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __UpperCamelCase ( self) -> List[str]:
a__ =np.random.randn(3 , 4)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy()))
a__ =np.random.randn(3 , 4 , 5)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def __UpperCamelCase ( self) -> List[str]:
a__ =np.random.randn(3 , 4)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , transpose(lowercase_).numpy()))
a__ =np.random.randn(3 , 4 , 5)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , transpose(lowercase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def __UpperCamelCase ( self) -> Optional[int]:
a__ =np.random.randn(3 , 4)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_) , np.asarray(transpose(lowercase_))))
a__ =np.random.randn(3 , 4 , 5)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0)))))
def __UpperCamelCase ( self) -> List[str]:
a__ =np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.reshape(lowercase_ , (4, 3))))
a__ =np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.reshape(lowercase_ , (12, 5))))
@require_torch
def __UpperCamelCase ( self) -> int:
a__ =np.random.randn(3 , 4)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy()))
a__ =np.random.randn(3 , 4 , 5)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy()))
@require_tf
def __UpperCamelCase ( self) -> Dict:
a__ =np.random.randn(3 , 4)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , reshape(lowercase_ , (4, 3)).numpy()))
a__ =np.random.randn(3 , 4 , 5)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , reshape(lowercase_ , (12, 5)).numpy()))
@require_flax
def __UpperCamelCase ( self) -> Any:
a__ =np.random.randn(3 , 4)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3)) , np.asarray(reshape(lowercase_ , (4, 3)))))
a__ =np.random.randn(3 , 4 , 5)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(reshape(lowercase_ , (12, 5)) , np.asarray(reshape(lowercase_ , (12, 5)))))
def __UpperCamelCase ( self) -> Tuple:
a__ =np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowercase_) , np.squeeze(lowercase_)))
a__ =np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.squeeze(lowercase_ , axis=2)))
@require_torch
def __UpperCamelCase ( self) -> Dict:
a__ =np.random.randn(1 , 3 , 4)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy()))
a__ =np.random.randn(1 , 4 , 1 , 5)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy()))
@require_tf
def __UpperCamelCase ( self) -> List[Any]:
a__ =np.random.randn(1 , 3 , 4)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , squeeze(lowercase_).numpy()))
a__ =np.random.randn(1 , 4 , 1 , 5)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , squeeze(lowercase_ , axis=2).numpy()))
@require_flax
def __UpperCamelCase ( self) -> int:
a__ =np.random.randn(1 , 3 , 4)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_) , np.asarray(squeeze(lowercase_))))
a__ =np.random.randn(1 , 4 , 1 , 5)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2) , np.asarray(squeeze(lowercase_ , axis=2))))
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.expand_dims(lowercase_ , axis=1)))
@require_torch
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =np.random.randn(3 , 4)
a__ =torch.tensor(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy()))
@require_tf
def __UpperCamelCase ( self) -> Tuple:
a__ =np.random.randn(3 , 4)
a__ =tf.constant(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , expand_dims(lowercase_ , axis=1).numpy()))
@require_flax
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =np.random.randn(3 , 4)
a__ =jnp.array(lowercase_)
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1) , np.asarray(expand_dims(lowercase_ , axis=1))))
| 20 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 1 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
__snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( _lowercase ) -> bool:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Expected string as input, found {type(_lowercase ).__name__}'''
raise TypeError(_lowercase )
__UpperCamelCase = spanish_id.replace('-' , '' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
__UpperCamelCase = int(spanish_id_clean[0:8] )
__UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
a__ : ClassVar[Features] = Features({"audio": Audio()})
a__ : ClassVar[Features] = Features({"transcription": Value("string")})
a__ : str = "audio"
a__ : str = "transcription"
def snake_case_ ( self : List[str] , __lowerCAmelCase : int ) -> Optional[int]:
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __lowerCAmelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
_A = copy.deepcopy(self )
_A = self.input_schema.copy()
_A = features[self.audio_column]
_A = input_schema
return task_template
@property
def snake_case_ ( self : str ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
'''simple docstring'''
from math import factorial
def A_( A : int = 20):
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(A) / (factorial(A) * factorial(n - k)))
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 3 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
_lowerCAmelCase = int(_lowercase )
_lowerCAmelCase = dict(sorted(self.labels.items() ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 4.0 , _lowercase = None , _lowercase = 50 , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.transformer.config.sample_size
_lowerCAmelCase = self.transformer.config.in_channels
_lowerCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
_lowerCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowerCAmelCase = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
_lowerCAmelCase = torch.tensor([1_000] * batch_size , device=self.device )
_lowerCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowerCAmelCase = latent_model_input[: len(_lowercase ) // 2]
_lowerCAmelCase = torch.cat([half, half] , dim=0 )
_lowerCAmelCase = self.scheduler.scale_model_input(_lowercase , _lowercase )
_lowerCAmelCase = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase = latent_model_input.device.type == """mps"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowerCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowerCAmelCase = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase , _lowerCAmelCase = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
_lowerCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
_lowerCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase , _lowerCAmelCase = torch.split(_lowercase , _lowercase , dim=1 )
else:
_lowerCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
_lowerCAmelCase = latent_model_input
_lowerCAmelCase = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase = self.vae.decode(_lowercase ).sample
_lowerCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 5 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: List[Any] ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 6 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a = '''sshleifer/mar_enro_6_3_student'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
super().setUp()
_A = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCAmelCase , )
_A = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
MarianMTModel.from_pretrained(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : Tuple ):
_A = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_A = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_A = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_A = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
_A = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_A = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_A = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
_A = argparse.ArgumentParser()
_A = pl.Trainer.add_argparse_args(_UpperCAmelCase )
_A = SummarizationModule.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
_A = parser.parse_args()
_A = main(_UpperCAmelCase )
# Check metrics
_A = load_json(model.metrics_save_path )
_A = metrics['val'][0]
_A = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _UpperCAmelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_A = os.listdir(_UpperCAmelCase )
_A = [x for x in contents if x.endswith('.ckpt' )][0]
_A = os.path.join(args.output_dir , _UpperCAmelCase )
_A = torch.load(_UpperCAmelCase , map_location='cpu' )
_A = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_A = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self : Dict ):
_A = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_A = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_A = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_A = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_A = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_A = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
_A = self.get_auto_remove_tmp_dir()
_A = bash_script.replace('--fp16' , '' )
_A = 6
_A = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
_A = argparse.ArgumentParser()
_A = pl.Trainer.add_argparse_args(_UpperCAmelCase )
_A = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
_A = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_A = distill_main(_UpperCAmelCase )
# Check metrics
_A = load_json(model.metrics_save_path )
_A = metrics['val'][0]
_A = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _UpperCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
_A = os.listdir(_UpperCAmelCase )
_A = [x for x in contents if x.endswith('.ckpt' )][0]
_A = os.path.join(args.output_dir , _UpperCAmelCase )
_A = torch.load(_UpperCAmelCase , map_location='cpu' )
_A = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_A = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 7 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__A : Tuple = parent
__A : str = batch_size
__A : Optional[int] = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Dict = size
__A : Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase)
self.assertIsInstance(encoding.boxes , _UpperCAmelCase)
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
__A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase)
self.assertListEqual(encoding.boxes , _UpperCAmelCase)
# with apply_OCR = False
__A : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase)
__A : List[str] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 8 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ = flax_key_tuple[:-1] + ('weight',)
A__ = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
A__ = flax_key_tuple[:-1] + ('weight',)
A__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if "metadata" in layer:
A__ = layer.split('metadata' )
A__ = ''.join(split_layer[0] )[:-1]
A__ = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
A__ = layer.split('kvstore' )
A__ = ''.join(split_layer[0] )[:-1]
A__ = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
A__ = layer.split('/' )
A__ = '/'.join(split_layer[:-1] )
A__ = (split_layer[-1],)
if "kvstore/path" in layer:
A__ = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
A__ = 'file'
else:
A__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = rename_keys(__UpperCamelCase )
A__ = {}
for k, v in current_block.items():
A__ = v
A__ = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> List[str]:
A__ = convert_file_size_to_int(__UpperCamelCase )
A__ = []
A__ = {}
A__ = 0
A__ = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
A__ = serialization.msgpack_restore(fp.read() )['optimizer']['target']
A__ = flatten_dict(__UpperCamelCase , sep='/' )
A__ = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
A__ = content
else:
A__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ = torch.tensor(__UpperCamelCase )
A__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ = rename_base_flax_keys(tuple(key.split('/' ) ) , __UpperCamelCase )
A__ = '/'.join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ = os.path.join(
__UpperCamelCase , weights_name.replace('.bin' , f'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ = {}
A__ = 0
A__ = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ = os.path.join(__UpperCamelCase , weights_name.replace('.bin' , f'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ = {}
A__ = {}
for idx, shard in enumerate(__UpperCamelCase ):
A__ = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
A__ = os.path.join(__UpperCamelCase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
A__ = shard
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {'total_size': total_size}
A__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , 'w' , encoding='utf-8' ) as f:
A__ = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + '\n'
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
A__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
A__ = TaTokenizer.from_pretrained('t5-small' )
A__ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
A__ = tokenizer(__UpperCamelCase , return_tensors='pt' ).input_ids
A__ = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 9 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
from __future__ import annotations
def _snake_case ( __snake_case ):
if len(__snake_case ) == 0:
return array
_UpperCamelCase , _UpperCamelCase = min(__snake_case ), max(__snake_case )
# Compute the variables
_UpperCamelCase = _max - _min + 1
_UpperCamelCase , _UpperCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCamelCase = i - _min
_UpperCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCamelCase = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
_UpperCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("Enter numbers separated by comma:\n")
_lowerCAmelCase = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 10 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Union[str, Any] = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowerCamelCase__ : Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[Any] = NllbTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else mask_token
lowercase__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = vocab_file
lowercase__ : Any = False if not self.vocab_file else True
lowercase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens})
lowercase__ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__ : int = src_lang if src_lang is not None else """eng_Latn"""
lowercase__ : Tuple = self.convert_tokens_to_ids(self._src_lang)
lowercase__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowercase__ ( self):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase__ : Tuple = src_lang
lowercase__ : Any = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tgt_lang_id
return inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[int] = src_lang
lowercase__ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def lowercase__ ( self):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
if self.legacy_behaviour:
lowercase__ : Optional[Any] = []
lowercase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ : Optional[Any] = [self.cur_lang_code]
lowercase__ : Tuple = [self.eos_token_id]
lowercase__ : int = self.convert_ids_to_tokens(self.prefix_tokens)
lowercase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
lowercase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
if self.legacy_behaviour:
lowercase__ : Dict = []
lowercase__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ : Tuple = [self.cur_lang_code]
lowercase__ : str = [self.eos_token_id]
lowercase__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens)
lowercase__ : int = self.convert_ids_to_tokens(self.suffix_tokens)
lowercase__ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(SCREAMING_SNAKE_CASE_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.')
return
lowercase__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE_):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_)
return (out_vocab_file,)
| 12 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : str = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'xlm-roberta'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[int] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : List[Any] = use_cache
__lowerCamelCase : Union[str, Any] = classifier_dropout
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 13 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
def __UpperCAmelCase ( __a : list[int] ,__a : list[int] ) -> None:
"""simple docstring"""
_a : List[Any] = len(__a )
print('''The following activities are selected:''' )
# The first activity is always selected
_a : List[Any] = 0
print(__a ,end=''',''' )
# Consider rest of the activities
for j in range(__a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__a ,end=''',''' )
_a : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = [1, 3, 0, 5, 8, 5]
a__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 14 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : List[Any] = '▁'
__A : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
__A : Union[str, Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__A : Union[str, Any] = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
__A : Optional[int] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ["input_ids", "attention_mask"]
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : str="<s>" , __lowerCamelCase : Union[str, Any]="<unk>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=False , **__lowerCamelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model )
SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase )
}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else "eng_Latn"
SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self : Any ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def _snake_case ( self : List[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def _snake_case ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Any , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : int , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str = "eng_Latn" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "fra_Latn" , **__lowerCamelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self : List[Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE = [self.cur_lang_code]
SCREAMING_SNAKE_CASE = [self.eos_token_id]
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE = [self.cur_lang_code]
SCREAMING_SNAKE_CASE = [self.eos_token_id] | 16 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : int ,a__ : Union[str, Any] ,a__ : Union[str, Any] ) -> str:
for attribute in key.split(""".""" ):
__A : Union[str, Any] = getattr(a__ ,a__ )
if weight_type is not None:
__A : Optional[int] = getattr(a__ ,a__ ).shape
else:
__A : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__A : Optional[Any] = value
elif weight_type == "weight_g":
__A : List[Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Dict = value
else:
__A : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : List[Any] ,a__ : str ) -> Optional[Any]:
__A : List[Any] = []
__A : str = fairseq_model.state_dict()
__A : List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Any = False
if "conv_layers" in name:
load_conv_layer(
a__ ,a__ ,a__ ,a__ ,hf_model.config.feat_extract_norm == """group""" ,)
__A : int = True
else:
for key, mapped_key in MAPPING.items():
__A : Union[str, Any] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__A : int = True
if "*" in mapped_key:
__A : List[Any] = name.split(a__ )[0].split(""".""" )[-2]
__A : int = mapped_key.replace("""*""" ,a__ )
if "weight_g" in name:
__A : Tuple = """weight_g"""
elif "weight_v" in name:
__A : Dict = """weight_v"""
elif "weight" in name:
__A : Any = """weight"""
elif "bias" in name:
__A : Optional[Any] = """bias"""
else:
__A : Union[str, Any] = None
set_recursively(a__ ,a__ ,a__ ,a__ ,a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ,a__ : str ,a__ : List[Any] ) -> Union[str, Any]:
__A : Any = full_name.split("""conv_layers.""" )[-1]
__A : List[str] = name.split(""".""" )
__A : Optional[int] = int(items[0] )
__A : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__A : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__A : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__A : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__A : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Tuple ,a__ : Dict=None ,a__ : str=None ,a__ : Optional[Any]=True ) -> int:
if config_path is not None:
__A : str = HubertConfig.from_pretrained(a__ )
else:
__A : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
__A : Tuple = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : List[str] = target_dict.pad_index
__A : int = target_dict.bos_index
__A : List[str] = target_dict.eos_index
__A : Optional[int] = len(target_dict.symbols )
__A : str = os.path.join(a__ ,"""vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ ,exist_ok=a__ )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,a__ )
__A : int = WavaVecaCTCTokenizer(
a__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=a__ ,)
__A : Dict = True if config.feat_extract_norm == """layer""" else False
__A : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=a__ ,return_attention_mask=a__ ,)
__A : Optional[Any] = WavaVecaProcessor(feature_extractor=a__ ,tokenizer=a__ )
processor.save_pretrained(a__ )
__A : Dict = HubertForCTC(a__ )
else:
__A : Optional[Any] = HubertModel(a__ )
if is_finetuned:
__A , __A , __A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__A , __A , __A : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__A : int = model[0].eval()
recursively_load_weights(a__ ,a__ ,a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 17 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_SCREAMING_SNAKE_CASE = random.Random()
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=400 , _lowerCAmelCase=2000 , _lowerCAmelCase=24 , _lowerCAmelCase=24 , _lowerCAmelCase=0.0 , _lowerCAmelCase=16000 , _lowerCAmelCase=True , _lowerCAmelCase=True , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = feature_size
_lowerCAmelCase = num_mel_bins
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
def _snake_case ( self ) -> str:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> List[Any]:
def _flatten(_lowerCAmelCase ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = SpeechaTextFeatureExtractionTester(self )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[int]:
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _snake_case ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
_lowerCAmelCase = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="max_length" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="longest" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase = feature_extractor(
_lowerCAmelCase , padding="longest" , max_length=16 , truncation=_lowerCAmelCase , return_tensors="np" , return_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = inputs.input_features
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _snake_case ( self ) -> List[str]:
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
from datasets import load_dataset
_lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _snake_case ( self ) -> Any:
# fmt: off
_lowerCAmelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) )
| 18 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = BertTokenizer
lowercase__ = BertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file)
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 12, 10, 11])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
_UpperCamelCase = self.get_tokenizer(do_lower_case=__a)
_UpperCamelCase = self.get_rust_tokenizer(do_lower_case=__a)
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
_UpperCamelCase = '''a\n\'ll !!to?\'d of, can\'t.'''
_UpperCamelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__a) , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCamelCase = {}
for i, token in enumerate(__a):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''') else False
_UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ['''的''', '''人''', '''有''']
_UpperCamelCase = ''''''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
_UpperCamelCase = False
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
| 19 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase_ : Dict = False
class __A ( unittest.TestCase ):
def A__ ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
return 12
@property
def A__ ( self :Dict ):
'''simple docstring'''
return 12
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
@property
def A__ ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[int] =12
__magic_name__ : str =12
__magic_name__ : Optional[int] ={
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__magic_name__ : int =TransformeraDModel(**__snake_case )
return model
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Any ="""cpu"""
__magic_name__ : List[str] =self.dummy_vqvae
__magic_name__ : List[Any] =self.dummy_text_encoder
__magic_name__ : List[Any] =self.dummy_tokenizer
__magic_name__ : List[Any] =self.dummy_transformer
__magic_name__ : Tuple =VQDiffusionScheduler(self.num_embed )
__magic_name__ : Optional[int] =LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
__magic_name__ : List[str] =VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
__magic_name__ : int =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Any ="""teddy bear playing in the pool"""
__magic_name__ : List[str] =torch.Generator(device=__snake_case ).manual_seed(0 )
__magic_name__ : Optional[Any] =pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" )
__magic_name__ : Any =output.images
__magic_name__ : List[str] =torch.Generator(device=__snake_case ).manual_seed(0 )
__magic_name__ : Dict =pipe(
[prompt] , generator=__snake_case , output_type="""np""" , return_dict=__snake_case , num_inference_steps=2 )[0]
__magic_name__ : List[str] =image[0, -3:, -3:, -1]
__magic_name__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__magic_name__ : Optional[int] =np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] ="""cpu"""
__magic_name__ : Union[str, Any] =self.dummy_vqvae
__magic_name__ : str =self.dummy_text_encoder
__magic_name__ : Dict =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_transformer
__magic_name__ : Tuple =VQDiffusionScheduler(self.num_embed )
__magic_name__ : int =LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__magic_name__ : int =VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
__magic_name__ : Dict =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Optional[Any] ="""teddy bear playing in the pool"""
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(0 )
__magic_name__ : Tuple =pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" )
__magic_name__ : List[str] =output.images
__magic_name__ : Optional[Any] =torch.Generator(device=__snake_case ).manual_seed(0 )
__magic_name__ : str =pipe(
[prompt] , generator=__snake_case , output_type="""np""" , return_dict=__snake_case , num_inference_steps=2 )[0]
__magic_name__ : Any =image[0, -3:, -3:, -1]
__magic_name__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__magic_name__ : Union[str, Any] =np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
__magic_name__ : int =VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
__magic_name__ : Tuple =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__magic_name__ : int =torch.Generator(device=__snake_case ).manual_seed(0 )
__magic_name__ : Optional[Any] =pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__snake_case , output_type="""np""" , )
__magic_name__ : Dict =output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : int = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A ( _a ):
lowercase_ = 'dpt'
def __init__( self : List[Any] , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : List[Any]=3_84 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Dict=[2, 5, 8, 11] , lowerCAmelCase_ : Optional[Any]="project" , lowerCAmelCase_ : int=[4, 2, 1, 0.5] , lowerCAmelCase_ : Optional[Any]=[96, 1_92, 3_84, 7_68] , lowerCAmelCase_ : List[Any]=2_56 , lowerCAmelCase_ : Optional[int]=-1 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=0.4 , lowerCAmelCase_ : List[str]=2_55 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=[1, 10_24, 24, 24] , lowerCAmelCase_ : Optional[int]=[0, 1] , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = hidden_size
_a = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_a = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_a = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_a = backbone_featmap_shape
_a = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_a = None
_a = None
_a = []
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = qkv_bias
_a = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_a = readout_type
_a = reassemble_factors
_a = neck_hidden_sizes
_a = fusion_hidden_size
_a = head_in_index
_a = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_a = use_auxiliary_head
_a = auxiliary_loss_weight
_a = semantic_loss_ignore_index
_a = semantic_classifier_dropout
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 22 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ : Optional[Any] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 24 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ ( _a):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set())
@pytest.fixture
def lowerCamelCase__ ( _a):
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = metric_id
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =[MetricMock(__A ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock())
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))])
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE : int = tuple(arg if arg != "tmp_path" else tmp_path for arg in args)
with pytest.warns(_a , match="https://huggingface.co/docs/evaluate"):
func(*_a) | 25 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__snake_case : str = b * b - 4 * a * c
__snake_case : Optional[Any] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
__snake_case : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 26 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__A : List[Any] = _symbol_database.Default()
__A : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__A : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__A : int = None
__A : List[str] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__A : int = 45
__A : int = 1_581
__A : List[Any] = 1_517
__A : Optional[Any] = 1_570
__A : List[str] = 1_584
__A : Dict = 1_793
__A : str = 1_795
__A : Tuple = 1_916
__A : str = 1_864
__A : Union[str, Any] = 1_905
__A : int = 1_919
__A : List[Any] = 2_429
__A : Any = 2_208
__A : Tuple = 2_418
__A : Union[str, Any] = 2_323
__A : Union[str, Any] = 2_407
# @@protoc_insertion_point(module_scope)
| 27 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
SCREAMING_SNAKE_CASE : int = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
SCREAMING_SNAKE_CASE : str = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
SCREAMING_SNAKE_CASE : Dict = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
SCREAMING_SNAKE_CASE : List[Any] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
SCREAMING_SNAKE_CASE : List[str] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('image_encoder.module' ,'flava.image_model' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('text_encoder.module' ,'flava.text_model' )
SCREAMING_SNAKE_CASE : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('text_projection' ,'flava.text_projection' )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('image_projection' ,'flava.image_projection' )
SCREAMING_SNAKE_CASE : Dict = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE : int = value
return upgrade
@torch.no_grad()
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: Any ,__UpperCamelCase: Optional[int]=None ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : int = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaConfig()
SCREAMING_SNAKE_CASE : str = FlavaForPreTraining(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(__UpperCamelCase ,__UpperCamelCase ,save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = torch.load(__UpperCamelCase ,map_location='cpu' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='cpu' )
SCREAMING_SNAKE_CASE : int = upgrade_state_dict(__UpperCamelCase ,__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 28 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , ):
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = '''gelu'''
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase , encoder_hidden_states=UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCAmelCase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a__: Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a__: str = False
a__: Tuple = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(UpperCAmelCase , UpperCAmelCase )
for k, v in name.items():
assert isinstance(UpperCAmelCase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a = logging.get_logger(__name__)
__a = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = '''bit'''
lowerCAmelCase = ['''preactivation''', '''bottleneck''']
lowerCAmelCase = ['''SAME''', '''VALID''']
def __init__( self ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=[256, 512, 1_024, 2_048] ,_SCREAMING_SNAKE_CASE=[3, 4, 6, 3] ,_SCREAMING_SNAKE_CASE="preactivation" ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_ : str = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Tuple = layer_type
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = global_padding
UpperCAmelCase_ : Optional[int] = num_groups
UpperCAmelCase_ : Dict = drop_path_rate
UpperCAmelCase_ : List[Any] = embedding_dynamic_padding
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : int = width_factor
UpperCAmelCase_ : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE ,out_indices=_SCREAMING_SNAKE_CASE ,stage_names=self.stage_names ) | 30 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=("DownEncoderBlock2D",) , _lowerCAmelCase : Any=(64,) , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Any="silu" , _lowerCAmelCase : Any=True , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = torch.nn.Convad(
_lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_down_block(
_lowerCAmelCase , num_layers=self.layers_per_block , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
self.down_blocks.append(_lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# out
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[-1] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = x
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : Optional[Any] ):
return module(*_lowerCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = down_block(_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase )
# post-process
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[Any]=("UpDecoderBlock2D",) , _lowerCAmelCase : List[str]=(64,) , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : int="silu" , _lowerCAmelCase : Any="group" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = nn.Convad(
_lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_ = list(reversed(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_up_block(
_lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , resnet_time_scale_shift=_lowerCAmelCase , )
self.up_blocks.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ = SpatialNorm(block_out_channels[0] , _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[0] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE_ = z
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : int ):
return module(*_lowerCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = up_block(_lowerCAmelCase , _lowerCAmelCase )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[str]="random" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=True ):
super().__init__()
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = vq_embed_dim
SCREAMING_SNAKE_CASE_ = beta
SCREAMING_SNAKE_CASE_ = legacy
SCREAMING_SNAKE_CASE_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ = self.used.shape[0]
SCREAMING_SNAKE_CASE_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ = self.re_embed
SCREAMING_SNAKE_CASE_ = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = sane_index_shape
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ = self.unknown_index
return new.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCAmelCase )
return back.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[int] ):
# reshape z -> (batch, height, width, channel) and flatten
SCREAMING_SNAKE_CASE_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ = torch.argmin(torch.cdist(_lowerCAmelCase , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase ).view(z.shape )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.remap_to_used(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.unmap_to_all(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase )
if shape is not None:
SCREAMING_SNAKE_CASE_ = z_q.view(_lowerCAmelCase )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=False ):
SCREAMING_SNAKE_CASE_ = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.chunk(_lowerCAmelCase , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ = deterministic
SCREAMING_SNAKE_CASE_ = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
SCREAMING_SNAKE_CASE_ = randn_tensor(
self.mean.shape , generator=_lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ = self.mean + self.std * sample
return x
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
return self.mean | 31 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCAmelCase = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
_UpperCAmelCase = [v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in index:
if weight[i] <= capacity:
_UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = StableUnCLIPPipeline
__lowercase : Any = TEXT_TO_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 32
snake_case__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ = StableUnCLIPImageNormalizer(embedding_dim=_a )
snake_case__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
snake_case__ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL()
snake_case__ = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:Any=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ = pipe('''anime turle''' , generator=_a , output_type='''np''' )
snake_case__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 33 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCamelCase = len(lowerCamelCase_) - 1
def UpperCAmelCase__ ( self , lowerCamelCase_) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowerCamelCase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase_) , 5) == 1
return output_values
def UpperCAmelCase__ ( self , lowerCamelCase_) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase = self.basis_function(lowerCamelCase_)
UpperCamelCase = 0.0
UpperCamelCase = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase__ ( self , lowerCamelCase_ = 0.01) -> List[Any]:
from matplotlib import pyplot as plt # type: ignore
UpperCamelCase = [] # x coordinates of points to plot
UpperCamelCase = [] # y coordinates of points to plot
UpperCamelCase = 0.0
while t <= 1:
UpperCamelCase = self.bezier_curve_function(lowerCamelCase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
UpperCamelCase = [i[0] for i in self.list_of_points]
UpperCamelCase = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase_ , lowerCamelCase_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 34 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
from heapq import heappop, heappush
import numpy as np
def a ( A__ , A__ , A__ , A__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = [(0, source)], set()
SCREAMING_SNAKE_CASE__ : Optional[int] = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = np.empty((rows, cols) , dtype=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = None
while queue:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Any = heappop(A__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ : Any = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = predecessors[x, y]
path.append(A__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ : List[Any] = dist + 1
SCREAMING_SNAKE_CASE__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : str = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = XGLMTokenizer
_lowercase = XGLMTokenizerFast
_lowercase = True
_lowercase = True
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : int = "<pad>"
a__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1_008 )
def _UpperCamelCase( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def _UpperCamelCase( self : Optional[int] ):
a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
a__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase( self : Dict ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
a__ : List[str] = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a__ : Any = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = tokenizer.encode(lowerCamelCase__ )
a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = "Hello World!"
a__ : List[str] = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _UpperCamelCase( self : List[Any] ):
# fmt: off
a__ : Optional[int] = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
| 37 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
import os
from math import logaa
def UpperCamelCase__ ( __magic_name__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
snake_case__ : float = 0
snake_case__ : Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
snake_case__ , snake_case__ : Tuple = list(map(__magic_name__ , line.split(""",""" ) ) )
if x * logaa(__magic_name__ ) > largest:
snake_case__ : Optional[Any] = x * logaa(__magic_name__ )
snake_case__ : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 38 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ):
snake_case_ = x_start
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case_ = xa
snake_case_ = fxa
return length
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase_ = 10
while i <= 10_00_00:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 39 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : str = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCamelCase : List[str] = {'dtype': torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : int = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : str = np.asarray(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
UpperCamelCase : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "torch.Tensor":
UpperCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : str = self._consolidate(batch[column_name] )
return batch
| 40 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase = 5_0003
lowerCAmelCase = 5_0002
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = PLBartTokenizer
_lowercase : Optional[Any] = None
_lowercase : int = False
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 4 , UpperCamelCase_ )]
self.assertListEqual(UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = PLBartTokenizer(UpperCamelCase_ , language_codes='''multi''' , keep_accents=UpperCamelCase_ )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 7 , UpperCamelCase_ )]
self.assertListEqual(
UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase__ = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase__ = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
_lowercase : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
_lowercase : Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_lowercase : Union[str, Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_lowercase : Tuple = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase_ ( cls: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase__ = 1
return cls
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase__ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase_ )
lowercase__ = 10
lowercase__ = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase__ = PLBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase__ = targets['''input_ids''']
lowercase__ = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 43 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2 ) + pow(_lowerCAmelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """roberta-prelayernorm"""
def __init__( self :Any , lowerCamelCase__ :List[Any]=5_02_65 , lowerCamelCase__ :Union[str, Any]=7_68 , lowerCamelCase__ :List[str]=12 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :str=30_72 , lowerCamelCase__ :Dict="gelu" , lowerCamelCase__ :Union[str, Any]=0.1 , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :Optional[Any]=5_12 , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :Any=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :Union[str, Any]=1 , lowerCamelCase__ :str=0 , lowerCamelCase__ :str=2 , lowerCamelCase__ :str="absolute" , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :str=None , **lowerCamelCase__ :Any , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = vocab_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :Optional[Any] = num_attention_heads
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ :str = max_position_embeddings
UpperCamelCase__ :Optional[int] = type_vocab_size
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :Tuple = position_embedding_type
UpperCamelCase__ :int = use_cache
UpperCamelCase__ :List[Any] = classifier_dropout
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :List[Any] ):
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 45 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowerCAmelCase : int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = list(s_dict.keys() )
for key in keys:
_lowerCamelCase : int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCamelCase : List[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
print(F"""{key} -> {new_key}""" )
_lowerCamelCase : Union[str, Any] = s_dict.pop(_lowerCamelCase )
return s_dict
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = emb.weight.shape
_lowerCamelCase : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCamelCase : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bytes:
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = os.path.basename(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = url.split("/" )[-2]
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ) and not os.path.isfile(_lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : List[Any] = open(_lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowerCamelCase ) as source, open(_lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
_lowerCamelCase : str = source.read(8192 )
if not buffer:
break
output.write(_lowerCamelCase )
loop.update(len(_lowerCamelCase ) )
_lowerCamelCase : Dict = open(_lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_lowerCamelCase : Tuple = _download(_MODELS[checkpoint_path] )
else:
_lowerCamelCase : List[Any] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCamelCase : Dict = original_checkpoint["dims"]
_lowerCamelCase : Union[str, Any] = original_checkpoint["model_state_dict"]
_lowerCamelCase : int = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCamelCase )
rename_keys(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowerCamelCase : Dict = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowerCamelCase , decoder_ffn_dim=_lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowerCamelCase : List[str] = WhisperForConditionalGeneration(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : int = model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0 and not set(_lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCamelCase : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) | 46 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = '''facebook/bart-large-mnli'''
__SCREAMING_SNAKE_CASE : int = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = '''text_classifier'''
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification
__SCREAMING_SNAKE_CASE : List[str] = ['''text''', ['''text''']]
__SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setup()
__a : Tuple = self.model.config
__a : str = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__a : Any = int(SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Any = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Dict = outputs.logits
__a : List[str] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 47 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : Tuple = 1
@register_to_config
def __init__( self : List[Any] , _lowercase : Any=20_00 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : Optional[int]=1E-3 ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : List[Any] , _lowercase : str , _lowercase : Union[str, torch.device] = None ):
__UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase )
def a ( self : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : Dict=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__UpperCAmelCase = std.unsqueeze(-1 )
__UpperCAmelCase = -score / std
# compute
__UpperCAmelCase = -1.0 / len(self.timesteps )
__UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__UpperCAmelCase = beta_t.unsqueeze(-1 )
__UpperCAmelCase = -0.5 * beta_t * x
__UpperCAmelCase = torch.sqrt(_lowercase )
__UpperCAmelCase = drift - diffusion**2 * score
__UpperCAmelCase = x + drift * dt
# add noise
__UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype )
__UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
return self.config.num_train_timesteps
| 49 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a__ : Optional[Any] , a__ : Any=13 , a__ : str=32 , a__ : Optional[int]=3 , a__ : Tuple=4 , a__ : Any=[10, 20, 30, 40] , a__ : Any=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : List[str]=True , a__ : Union[str, Any]=37 , a__ : Tuple="gelu" , a__ : Any=10 , a__ : List[str]=0.02 , a__ : List[Any]=["stage2", "stage3", "stage4"] , a__ : Any=[2, 3, 4] , a__ : int=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_stages
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = scope
def __snake_case ( self : Dict ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : Dict , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any] ):
UpperCAmelCase = ConvNextVaModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : Dict , a__ : Any , a__ : Dict , a__ : List[Any] ):
UpperCAmelCase = ConvNextVaForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , a__ : Optional[Any] , a__ : List[Any] , a__ : Dict ):
UpperCAmelCase = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase =(
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[Any] ):
UpperCAmelCase = ConvNextVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __snake_case ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __snake_case ( self : str ):
pass
def __snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase = True
if model_class.__name__ in [
*get_values(a__ ),
*get_values(a__ ),
]:
continue
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.train()
UpperCAmelCase = self._prepare_for_class(a__ , a__ , return_labels=a__ )
UpperCAmelCase = model(**a__ ).loss
loss.backward()
def __snake_case ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase = False
UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(a__ ), *get_values(a__ )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase = self._prepare_for_class(a__ , a__ , return_labels=a__ )
UpperCAmelCase = model(**a__ ).loss
loss.backward()
def __snake_case ( self : str ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Any ):
def check_hidden_states_output(a__ : Dict , a__ : str , a__ : Dict ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : Tuple ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ConvNextVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> int:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Dict ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = preprocessor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 51 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''efficientformer'''
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Optional[int] = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = hidden_sizes
__a : List[Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : List[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Union[str, Any] = patch_size
__a : Dict = num_channels
__a : Dict = depths
__a : Optional[int] = mlp_expansion_ratio
__a : Any = downsamples
__a : Any = dim
__a : Dict = key_dim
__a : Dict = attention_ratio
__a : Any = resolution
__a : str = pool_size
__a : List[Any] = downsample_patch_size
__a : Any = downsample_stride
__a : str = downsample_pad
__a : Union[str, Any] = drop_path_rate
__a : str = num_metaad_blocks
__a : List[Any] = distillation
__a : str = use_layer_scale
__a : Optional[int] = layer_scale_init_value
__a : Optional[Any] = image_size
__a : Optional[int] = batch_norm_eps | 52 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
import os
def a_ ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'triangle.txt' )
with open(lowerCAmelCase_ ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = []
for line in triangle:
__lowerCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1, len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
__lowerCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_, lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 53 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
from math import pi, sqrt, tan
def a__ ( lowercase__ ):
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCAmelCase_ =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowercase__ , 2 ) * torus_radius * tube_radius
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( lowercase__ ):
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCAmelCase_ =(sidea + sidea + sidea) / 2
UpperCAmelCase_ =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( lowercase__ ):
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print("""\nSurface Areas of various geometric shapes: \n""")
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 54 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Tuple = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_a : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a (lowercase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , lowercase__ , )
if isinstance(lowercase__ , torch.Tensor ):
return image
elif isinstance(lowercase__ , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__snake_case = np.concatenate(lowercase__ , axis=0 )
__snake_case = np.array(lowercase__ ).astype(np.floataa ) / 2_55.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(lowercase__ )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(lowercase__ , dim=0 )
return image
def _a (lowercase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Dict:
"""simple docstring"""
if isinstance(lowercase__ , torch.Tensor ):
return mask
elif isinstance(lowercase__ , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__snake_case = np.concatenate(lowercase__ , axis=0 )
__snake_case = mask.astype(np.floataa ) / 2_55.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(lowercase__ )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(lowercase__ , dim=0 )
return mask
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : UNetaDModel
_SCREAMING_SNAKE_CASE : RePaintScheduler
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : int = 250 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__snake_case = image
__snake_case = _preprocess_image(SCREAMING_SNAKE_CASE_ )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__snake_case = original_image.shape
__snake_case = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 56 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ ) -> float:
UpperCamelCase_: Optional[Any] = 0.00
UpperCamelCase_: List[Any] = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase_: Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(UpperCAmelCase__ )
first_sum += 1 / float(UpperCAmelCase__ )
index += 1
return 1 / first_sum
def snake_case (UpperCAmelCase__ ) -> float:
UpperCamelCase_: Tuple = 0.00
UpperCamelCase_: Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase_: Optional[int] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(UpperCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __lowerCAmelCase ( __UpperCamelCase : int = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
import operator as op
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Tuple =lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
lowerCamelCase__: Tuple ={
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
else:
lowerCamelCase__: List[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
lowerCamelCase__: Optional[Any] =stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__a ) , sep=" | " )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__a ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__A = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 59 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
snake_case_ , snake_case_ : Tuple = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ : Tuple = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ , snake_case_ : int = i, i + z_result[i] - 1
return z_result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ : Union[str, Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : tuple , lowerCAmelCase_ : Path , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , enable_onnx_checker=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
else:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
@torch.no_grad()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , torch_dtype=lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowerCAmelCase__ = Path(lowerCAmelCase_ )
# TEXT ENCODER
lowerCAmelCase__ = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ = pipeline.unet.config.in_channels
lowerCAmelCase__ = pipeline.unet.config.sample_size
lowerCAmelCase__ = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(2 ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=lowerCAmelCase_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , )
lowerCAmelCase__ = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ = os.path.dirname(lowerCAmelCase_ )
lowerCAmelCase__ = onnx.load(lowerCAmelCase_ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase_ )
os.mkdir(lowerCAmelCase_ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase_ , lowerCAmelCase_ , save_as_external_data=lowerCAmelCase_ , all_tensors_to_one_file=lowerCAmelCase_ , location="weights.pb" , convert_attribute=lowerCAmelCase_ , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ = pipeline.vae
lowerCAmelCase__ = vae_encoder.config.in_channels
lowerCAmelCase__ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ = lambda lowerCAmelCase_ , lowerCAmelCase_ : vae_encoder.encode(lowerCAmelCase_ , lowerCAmelCase_ )[0].sample()
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
# VAE DECODER
lowerCAmelCase__ = pipeline.vae
lowerCAmelCase__ = vae_decoder.config.latent_channels
lowerCAmelCase__ = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ = vae_encoder.decode
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ = pipeline.safety_checker
lowerCAmelCase__ = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ = safety_checker.config.vision_config.image_size
lowerCAmelCase__ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=lowerCAmelCase_ , )
del pipeline.safety_checker
lowerCAmelCase__ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCAmelCase__ = pipeline.feature_extractor
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase_ )
print("ONNX pipeline saved to" , lowerCAmelCase_ )
del pipeline
del onnx_pipeline
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 61 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = UnCLIPImageVariationPipeline
UpperCamelCase_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCamelCase_ : int = IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCamelCase_ : Optional[int] = False
@property
def _A ( self : Optional[int] ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Dict ):
return self.time_input_dim
@property
def _A ( self : Tuple ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _A ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase_ )
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase_ )
@property
def _A ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
SCREAMING_SNAKE_CASE : Optional[int] = UnCLIPTextProjModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : int ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _A ( self : List[str] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_decoder
SCREAMING_SNAKE_CASE : str = self.dummy_text_proj
SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : int = self.dummy_super_res_first
SCREAMING_SNAKE_CASE : List[str] = self.dummy_super_res_last
SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _A ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=True ):
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
if pil_image:
SCREAMING_SNAKE_CASE : Optional[int] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Tuple = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : int = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "cpu"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = "cpu"
SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "cpu"
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
SCREAMING_SNAKE_CASE : Any = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = torch.device("cpu" )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.decoder.dtype
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE : int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
SCREAMING_SNAKE_CASE : List[str] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ ).images
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
# Don't pass image, instead pass embedding
SCREAMING_SNAKE_CASE : Tuple = pipeline_inputs.pop("image" )
SCREAMING_SNAKE_CASE : Any = pipe.image_encoder(UpperCAmelCase_ ).image_embeds
SCREAMING_SNAKE_CASE : int = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
SCREAMING_SNAKE_CASE : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_ )
@skip_mps
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = torch_device == "cpu"
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : str = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
SCREAMING_SNAKE_CASE : Optional[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase_ )
@skip_mps
def _A ( self : Optional[Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _A ( self : int ):
return super().test_save_load_local()
@skip_mps
def _A ( self : Tuple ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
SCREAMING_SNAKE_CASE : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
SCREAMING_SNAKE_CASE : Union[str, Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , )
SCREAMING_SNAKE_CASE : str = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15 )
| 62 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__UpperCAmelCase : Tuple = quote(__lowerCamelCase )
return hfh.hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" , revision=__lowerCamelCase )
| 63 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase_ : List[str] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["pixel_values"]
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ) -> None:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE__: Any= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
SCREAMING_SNAKE_CASE__: Tuple= get_size_dict(lowerCAmelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__: List[str]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[int]= resample
SCREAMING_SNAKE_CASE__: Any= do_rescale
SCREAMING_SNAKE_CASE__: str= rescale_factor
SCREAMING_SNAKE_CASE__: str= do_center_crop
SCREAMING_SNAKE_CASE__: int= crop_size
SCREAMING_SNAKE_CASE__: List[str]= do_flip_channel_order
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PIL.Image.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Any= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__: Dict= get_resize_output_image_size(lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> List[Any]:
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE__: int= do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__: List[str]= resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__: List[str]= do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__: Optional[int]= rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__: str= do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__: Optional[Any]= (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE__: List[Any]= size if size is not None else self.size
SCREAMING_SNAKE_CASE__: int= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__: Optional[Any]= get_size_dict(lowerCAmelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE__: Tuple= make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__: Dict= [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__: Any= [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__: Dict= [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__: Dict= [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE__: Any= [self.flip_channel_order(image=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: int= [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__: Dict= {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Any= target_sizes.numpy()
SCREAMING_SNAKE_CASE__: Any= []
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: str= torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[Any]= logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__: List[Any]= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 64 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """lilt"""
def __init__( self : Union[str, Any] ,A : Any=30_522 ,A : List[Any]=768 ,A : Optional[Any]=12 ,A : Any=12 ,A : str=3_072 ,A : Any="gelu" ,A : Optional[int]=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=512 ,A : Optional[int]=2 ,A : List[str]=0.0_2 ,A : str=1e-12 ,A : Union[str, Any]=0 ,A : Optional[int]="absolute" ,A : Optional[Any]=None ,A : int=4 ,A : str=1_024 ,**A : int ,):
'''simple docstring'''
super().__init__(pad_token_id=A ,**A )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Optional[int] = channel_shrink_ratio
UpperCAmelCase__ : str = max_ad_position_embeddings
| 65 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
from __future__ import annotations
from math import pi, sqrt
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple:
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Any=2.0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1e-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=8 , ) -> List[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =depths
__UpperCAmelCase =num_heads
__UpperCAmelCase =window_size
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =hidden_act
__UpperCAmelCase =use_absolute_embeddings
__UpperCAmelCase =patch_norm
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =initializer_range
__UpperCAmelCase =is_training
__UpperCAmelCase =scope
__UpperCAmelCase =use_labels
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =encoder_stride
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__UpperCAmelCase =SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
__UpperCAmelCase =SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase =1
__UpperCAmelCase =SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =self.type_sequence_label_size
__UpperCAmelCase =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
def _a ( self : str ) -> str:
__UpperCAmelCase =SwinvaModelTester(self )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
def _a ( self : List[Any] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> str:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _a ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _a ( self : str ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
__UpperCAmelCase =len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =config.window_size**2
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase =len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase =self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase =2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =outputs.hidden_states
__UpperCAmelCase =getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase =outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =reshaped_hidden_states[0].shape
__UpperCAmelCase =(
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =3
__UpperCAmelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase =True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Dict:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : int ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase =SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =_config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ) -> Dict:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _a ( self : int ) -> Optional[int]:
__UpperCAmelCase =SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.default_image_processor
__UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 68 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Dict = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """lilt"""
def __init__( self : List[Any] , a_ : List[Any]=30_522 , a_ : Optional[int]=768 , a_ : Optional[Any]=12 , a_ : Union[str, Any]=12 , a_ : Optional[int]=3_072 , a_ : Dict="gelu" , a_ : Union[str, Any]=0.1 , a_ : str=0.1 , a_ : Optional[int]=512 , a_ : Tuple=2 , a_ : Dict=0.02 , a_ : Tuple=1e-12 , a_ : str=0 , a_ : Union[str, Any]="absolute" , a_ : Dict=None , a_ : List[str]=4 , a_ : Optional[Any]=1_024 , **a_ : str , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = classifier_dropout
__snake_case = channel_shrink_ratio
__snake_case = max_ad_position_embeddings
| 69 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Any=None , lowercase : Optional[int]=None , lowercase : Optional[int]=None ):
'''simple docstring'''
lowerCamelCase_ = True
while ask_again:
lowerCamelCase_ = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str]=[] , lowercase : Dict=None , lowercase : int=0 ):
'''simple docstring'''
lowerCamelCase_ = BulletMenu(lowercase , lowercase )
lowerCamelCase_ = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = int(lowercase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class A( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def a__ ( self : List[str] , A_ : Tuple , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = super()._format_usage(A_ , A_ , A_ , A_ )
lowerCamelCase_ = usage.replace('<command> [<args>] ' , '' )
return usage
| 70 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_lowerCamelCase = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 71 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 72 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = inspect.getfile(accelerate.test_utils )
_lowercase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_lowercase : int = ['''accelerate''', '''launch''']
_lowercase : int = Path.home() / '''.cache/huggingface/accelerate'''
_lowercase : int = '''default_config.yaml'''
_lowercase : List[Any] = config_folder / config_file
_lowercase : str = config_folder / '''_default_config.yaml'''
_lowercase : str = Path('''tests/test_configs''' )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[str]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> str:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=a):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(a), self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[Any] = '''test-tpu'''
_lowercase : Optional[Any] = '''us-central1-a'''
_lowercase : List[str] = '''ls'''
_lowercase : Union[str, Any] = ['''accelerate''', '''tpu-config''']
_lowercase : Tuple = '''cd /usr/share'''
_lowercase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
_lowercase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=a)
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , a , )
| 73 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowercase_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = get_test_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__SCREAMING_SNAKE_CASE : List[str] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = get_model_to_test_mapping(_A )
__SCREAMING_SNAKE_CASE : Dict = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_model_to_tester_mapping(_A )
__SCREAMING_SNAKE_CASE : int = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
| 74 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCAmelCase__ : int = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[str] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCAmelCase__ : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Any = dataset
UpperCAmelCase__ : int = name
UpperCAmelCase__ : Union[str, Any] = con
UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : int = num_proc
UpperCAmelCase__ : Optional[int] = to_sql_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A )
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A )
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A )
UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs )
return written
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args
UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : Tuple = batch.to_pandas()
UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 75 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = 0 , __UpperCamelCase = 0 ):
__lowercase : List[Any] = end or len(__UpperCamelCase )
for i in range(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = i
__lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__lowercase : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
__lowercase : List[str] = temp_index_value
return array
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Max Heap
__lowercase : Dict = index
__lowercase : Optional[Any] = 2 * index + 1 # Left Node
__lowercase : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__lowercase : str = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__lowercase : int = right_index
if largest != index:
__lowercase ,__lowercase : Union[str, Any] = array[largest], array[index]
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[str] = len(__UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
__lowercase ,__lowercase : Optional[int] = array[0], array[i]
heapify(__UpperCamelCase , 0 , __UpperCamelCase )
return array
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = low
__lowercase : Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__lowercase ,__lowercase : str = array[j], array[i]
i += 1
def __UpperCAmelCase ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return array
__lowercase : Tuple = 2 * math.ceil(math.loga(len(__UpperCamelCase ) ) )
__lowercase : Dict = 16
return intro_sort(__UpperCamelCase , 0 , len(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__UpperCamelCase )
max_depth -= 1
__lowercase : str = median_of_a(__UpperCamelCase , __UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
__lowercase : Dict = partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
intro_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : List[str] = p
return insertion_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma : ').strip()
a_ = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 76 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
A = list[list[int]]
# assigning initial values to the grid
A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _UpperCamelCase ( UpperCamelCase ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _UpperCamelCase ( UpperCamelCase ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(UpperCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : Dict = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Dict = digit
if sudoku(UpperCamelCase ) is not None:
return grid
__UpperCAmelCase : Optional[Any] = 0
return None
def _UpperCamelCase ( UpperCamelCase ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(UpperCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 77 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase_ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
UpperCAmelCase_ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase_ = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(snake_case_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase_ = val.squeeze_()
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict="groupvit-gcc-yfcc" , snake_case_ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = GroupViTConfig()
UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval()
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"]
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0)
# verify result
UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 )
processor.save_pretrained(snake_case_ )
model.save_pretrained(snake_case_ )
print("Successfully saved processor and model to" , snake_case_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ , organization="nielsr" )
model.push_to_hub(snake_case_ , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 78 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ : Optional[int] = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : List[Any] = jnp.array([tokenizer.encode(_lowerCAmelCase )] )
UpperCAmelCase__ : str = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : int = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase )["""last_hidden_state"""]
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 79 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(len(lowerCamelCase ) - 1 , 0 , -1 ):
__lowercase = False
for j in range(lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowercase , __lowercase = unsorted[j - 1], unsorted[j]
__lowercase = True
for j in range(lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
__lowercase , __lowercase = unsorted[j + 1], unsorted[j]
__lowercase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(""",""")]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 80 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.