code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _A ( lowercase , lowercase="shi-labs/oneformer_demo" ):
"""simple docstring"""
with open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) as f:
a =json.load(lowercase )
a ={}
a =[]
a =[]
for key, info in class_info.items():
a =info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowercase ) )
a =thing_ids
a =class_names
return metadata
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=None , __A=True , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=10 , __A=False , __A=255 , __A="shi-labs/oneformer_demo" , __A="ade20k_panoptic.json" , __A=10 , ) -> List[Any]:
a =parent
a =batch_size
a =num_channels
a =min_resolution
a =max_resolution
a =do_resize
a ={'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
a =do_normalize
a =image_mean
a =image_std
a =class_info_file
a =prepare_metadata(__A , __A )
a =num_text
a =repo_path
# for the post_process_functions
a =2
a =10
a =10
a =3
a =4
a =num_labels
a =do_reduce_labels
a =ignore_index
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self , __A , __A=False ) -> List[Any]:
if not batched:
a =image_inputs[0]
if isinstance(__A , Image.Image ):
a , a =image.size
else:
a , a =image.shape[1], image.shape[2]
if w < h:
a =int(self.size['''shortest_edge'''] * h / w )
a =self.size['''shortest_edge''']
elif w > h:
a =self.size['''shortest_edge''']
a =int(self.size['''shortest_edge'''] * w / h )
else:
a =self.size['''shortest_edge''']
a =self.size['''shortest_edge''']
else:
a =[]
for image in image_inputs:
a , a =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a =max(__A , key=lambda __A : item[0] )[0]
a =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__lowerCAmelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
self.assertTrue(hasattr(__A , '''ignore_index''' ) )
self.assertTrue(hasattr(__A , '''class_info_file''' ) )
self.assertTrue(hasattr(__A , '''num_text''' ) )
self.assertTrue(hasattr(__A , '''repo_path''' ) )
self.assertTrue(hasattr(__A , '''metadata''' ) )
self.assertTrue(hasattr(__A , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processor
a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a =prepare_image_inputs(self.image_processing_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
a =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a , a =self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a =self.image_processing_tester.get_expected_values(__A , batched=__A )
a =image_processor(
__A , ['''semantic'''] * len(__A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# Initialize image_processor
a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a =prepare_image_inputs(self.image_processing_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
a =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a , a =self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a =self.image_processing_tester.get_expected_values(__A , batched=__A )
a =image_processor(
__A , ['''semantic'''] * len(__A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Initialize image_processor
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processing_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
a =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a , a =self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a =self.image_processing_tester.get_expected_values(__A , batched=__A )
a =image_processor(
__A , ['''semantic'''] * len(__A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False , __A="np" ) -> Union[str, Any]:
a =self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a =self.image_processing_tester.num_labels
a =None
a =None
a =prepare_image_inputs(self.image_processing_tester , equal_resolution=__A )
if with_segmentation_maps:
a =num_labels
if is_instance_map:
a =list(range(__A ) ) * 2
a =dict(enumerate(__A ) )
a =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a =[Image.fromarray(__A ) for annotation in annotations]
a =image_processor(
__A , ['''semantic'''] * len(__A ) , __A , return_tensors='''pt''' , instance_id_to_semantic_id=__A , pad_and_return_pixel_mask=__A , )
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
def common(__A=False , __A=None ):
a =self.comm_get_image_processor_inputs(
with_segmentation_maps=__A , is_instance_map=__A , segmentation_type=__A )
a =inputs['''mask_labels''']
a =inputs['''class_labels''']
a =inputs['''pixel_values''']
a =inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__A , __A , __A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__A )
common(is_instance_map=__A , segmentation_type='''pil''' )
common(is_instance_map=__A , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =np.zeros((20, 50) )
a =1
a =1
a =1
a =binary_mask_to_rle(__A )
self.assertEqual(len(__A ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a =self.image_processing_tester.get_fake_oneformer_outputs()
a =fature_extractor.post_process_semantic_segmentation(__A )
self.assertEqual(len(__A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a =[(1, 4) for i in range(self.image_processing_tester.batch_size )]
a =fature_extractor.post_process_semantic_segmentation(__A , target_sizes=__A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a =self.image_processing_tester.get_fake_oneformer_outputs()
a =image_processor.post_process_instance_segmentation(__A , threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a =self.image_processing_tester.get_fake_oneformer_outputs()
a =image_processor.post_process_panoptic_segmentation(__A , threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 81 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''pix2struct_text_model'''
__lowerCamelCase = ['''past_key_values''']
__lowerCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _snake_case=50244 , _snake_case=768 , _snake_case=64 , _snake_case=2048 , _snake_case=12 , _snake_case=12 , _snake_case=32 , _snake_case=128 , _snake_case=0.1 , _snake_case=1e-6 , _snake_case=1.0 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=False , _snake_case=0 , _snake_case=1 , _snake_case=False , _snake_case=True , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = d_kv
_lowerCAmelCase = d_ff
_lowerCAmelCase = num_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = relative_attention_max_distance
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = use_cache
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase = dense_act_fn
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , )
@classmethod
def snake_case ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''pix2struct_vision_model'''
def __init__( self , _snake_case=768 , _snake_case=768 , _snake_case=2048 , _snake_case=64 , _snake_case=12 , _snake_case=12 , _snake_case="gelu_new" , _snake_case=1e-6 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=1e-10 , _snake_case=1.0 , _snake_case=4096 , _snake_case=32 , _snake_case=128 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = patch_embed_hidden_size
_lowerCAmelCase = d_ff
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = initializer_range
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = dense_act_fn
_lowerCAmelCase = seq_len
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = relative_attention_max_distance
_lowerCAmelCase = d_kv
@classmethod
def snake_case ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''pix2struct'''
__lowerCamelCase = True
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=1.0 , _snake_case=0.02 , _snake_case=False , _snake_case=False , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case )
if text_config is None:
_lowerCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_lowerCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_lowerCAmelCase = PixaStructTextConfig(**_snake_case )
_lowerCAmelCase = PixaStructVisionConfig(**_snake_case )
_lowerCAmelCase = self.text_config.decoder_start_token_id
_lowerCAmelCase = self.text_config.pad_token_id
_lowerCAmelCase = self.text_config.eos_token_id
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = initializer_range
_lowerCAmelCase = self.initializer_range
_lowerCAmelCase = self.initializer_range
_lowerCAmelCase = is_vqa
@classmethod
def snake_case ( cls , _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.text_config.to_dict()
_lowerCAmelCase = self.vision_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 82 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
'''simple docstring'''
class lowercase__ :
def __init__( self : Dict ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Dict = n
_UpperCamelCase : Optional[int] = [None] * self.n
_UpperCamelCase : List[str] = 0 # index of the first element
_UpperCamelCase : Dict = 0
_UpperCamelCase : int = 0
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.size
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.size == 0
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_UpperCamelCase : Optional[int] = data
_UpperCamelCase : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
_UpperCamelCase : List[Any] = self.array[self.front]
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 83 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :int
UpperCAmelCase_ :TreeNode | None = None
UpperCAmelCase_ :TreeNode | None = None
__UpperCAmelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _snake_case ( lowercase__ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase__ ) != count_coins(lowercase__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = get_distrib(node.left )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_distrib(node.right )
lowerCAmelCase_ :Optional[Any] = 1 - left_distrib_excess
lowerCAmelCase_ :Tuple = 1 - right_distrib_excess
lowerCAmelCase_ :Optional[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase__ )
+ abs(lowercase__ )
)
lowerCAmelCase_ :int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase__ , lowercase__ )
return get_distrib(lowercase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( snake_case : Path , snake_case : list ):
'''simple docstring'''
snake_case_ = "\n".join(snake_case )
Path(snake_case ).open("w" ).writelines(snake_case )
_SCREAMING_SNAKE_CASE : List[str] = "patrickvonplaten/t5-tiny-random"
_SCREAMING_SNAKE_CASE : str = "sshleifer/bart-tiny-random"
_SCREAMING_SNAKE_CASE : Optional[int] = "sshleifer/tiny-mbart"
_SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case_ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(a__ , a__ )
snake_case_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
snake_case_ = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(a__ , "argv" , a__ ):
run_generate()
assert Path(a__ ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case_ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
snake_case_ = Path(self.get_auto_remove_tmp_dir() )
snake_case_ = str(tmp_dir / "scores.json" )
snake_case_ = str(tmp_dir / "val.target" )
_dump_articles(a__ , text["en"] )
_dump_articles(a__ , text["de"] )
snake_case_ = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case_ = F'\n run_eval_search.py\n {model}\n {str(a__ )}\n {str(a__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(a__ , "argv" , a__ ):
with CaptureStdout() as cs:
run_search()
snake_case_ = [" num_beams | length_penalty", model, "Best score args"]
snake_case_ = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(a__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a__ ).exists()
os.remove(Path(a__ ) )
| 85 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase__ = {
"""openbmb/cpm-ant-10b""": 1_024,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase : Dict = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = token.rstrip('\n' )
__lowerCAmelCase : str = index
return vocab
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=2_00 ):
__lowerCAmelCase : Optional[Any] = vocab
__lowerCAmelCase : Optional[int] = unk_token
__lowerCAmelCase : List[Any] = max_input_chars_per_word
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : List[str] = []
while start < len(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = None
while start < end:
__lowerCAmelCase : List[str] = ''.join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = end
return sub_tokens
class A__ ( _lowerCamelCase):
A_ : Any = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ['input_ids', 'attention_mask']
A_ : int = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_SCREAMING_SNAKE_CASE , eod_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , line_token=_SCREAMING_SNAKE_CASE , space_token=_SCREAMING_SNAKE_CASE , padding_side=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = bod_token
__lowerCAmelCase : List[Any] = eod_token
__lowerCAmelCase : Dict = load_vocab(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.encoder[space_token]
__lowerCAmelCase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCamelCase ( self ):
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder["\n"]
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = []
for x in jieba.cut(_SCREAMING_SNAKE_CASE , cut_all=_SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
return output_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
__lowerCAmelCase : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return token in self.encoder
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return "".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowerCAmelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCAmelCase : int = 0
if " " in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder['\n']
del self.encoder["\n"]
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
__lowerCAmelCase : Optional[int] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) | 86 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_ ( _lowerCamelCase : str = "laptop"):
lowercase__ : Optional[Any] = f'''https://www.amazon.in/laptop/s?k={product}'''
lowercase__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowercase__ : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase).text)
# Initialize a Pandas dataframe with the column titles
lowercase__ : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"}) , ):
try:
lowercase__ : List[str] = item.ha.text
lowercase__ : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
lowercase__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"}).text
try:
lowercase__ : Tuple = item.find("span" , attrs={"class": "a-icon-alt"}).text
except AttributeError:
lowercase__ : List[Any] = "Not available"
try:
lowercase__ : int = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"}).text.split("₹")[1]
)
except AttributeError:
lowercase__ : Any = ""
try:
lowercase__ : Optional[Any] = float(
(
(
float(product_mrp.strip("₹").replace("," , ""))
- float(product_price.strip("₹").replace("," , ""))
)
/ float(product_mrp.strip("₹").replace("," , ""))
)
* 100)
except ValueError:
lowercase__ : Dict = float("nan")
except AttributeError:
pass
lowercase__ : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase__ : str = " "
lowercase__ : Dict = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase = '''headphones'''
get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv")
| 87 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = int(A_ )
# Initialize Result
__magic_name__ = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : List[Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__lowerCAmelCase : Any = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__lowerCAmelCase : Union[str, Any] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__lowerCAmelCase : Optional[Any] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
__lowerCAmelCase : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 88 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCAmelCase : List[str] = 'CIDAS/clipseg-rd64-refined'
lowerCAmelCase : Optional[int] = 'image_segmenter'
lowerCAmelCase : Union[str, Any] = CLIPSegForImageSegmentation
lowerCAmelCase : str = ['image', 'text']
lowerCAmelCase : int = ['image']
def __init__( self : Tuple ,*_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : "Image" ,_UpperCAmelCase : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[Any] ):
with torch.no_grad():
_a : int = self.model(**_UpperCAmelCase ).logits
return logits
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[int] ):
_a : Dict = outputs.cpu().detach().numpy()
_a : List[str] = 0
_a : int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 89 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
from math import pi, sqrt
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(UpperCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = 1.0
while num:
__A = float(input("Gamma of: "))
print(f'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 90 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = checkpoints.load_tax_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = flatten_dict(__a )
return flax_params
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
SCREAMING_SNAKE_CASE_ : Any = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_ : str = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ : str = new_key.replace(__a , __a )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ : Any = new_key.replace(__a , __a )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ : int = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE_ : str = flax_dict[key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _A (__a , __a , __a=False , __a=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_flax_param(__a )
if not use_large:
SCREAMING_SNAKE_CASE_ : Optional[int] = PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_ : int = PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_ : Dict = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE_ : Tuple = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__a )
SCREAMING_SNAKE_CASE_ : Dict = PixaStructForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_and_convert_flax_params(__a )
model.load_state_dict(__a )
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
SCREAMING_SNAKE_CASE_ : List[str] = PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_ : str = PixaStructProcessor(image_processor=__a , tokenizer=__a )
if use_large:
SCREAMING_SNAKE_CASE_ : int = 40_96
SCREAMING_SNAKE_CASE_ : int = True
# mkdir if needed
os.makedirs(__a , exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
print('''Model saved in {}'''.format(__a ) )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
UpperCAmelCase_ : str = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 91 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
UpperCamelCase__ = """Input must be a string of 8 numbers plus letter"""
UpperCamelCase__ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _a ( SCREAMING_SNAKE_CASE_ : str ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = F"""Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = spanish_id.replace("-" , "" ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
__lowerCAmelCase = int(spanish_id_clean[0:8] )
__lowerCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = ShapEImgaImgPipeline
lowerCAmelCase_ = ['''image''']
lowerCAmelCase_ = ['''image''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return 32
@property
def _snake_case ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self ):
"""simple docstring"""
return 8
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase_ : Tuple = CLIPVisionModel(__SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase_ : Any = PriorTransformer(**__SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase_ : Optional[Any] = ShapERenderer(**__SCREAMING_SNAKE_CASE )
return model
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.dummy_prior
lowercase_ : Union[str, Any] = self.dummy_image_encoder
lowercase_ : Tuple = self.dummy_image_processor
lowercase_ : Optional[Any] = self.dummy_renderer
lowercase_ : Tuple = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__SCREAMING_SNAKE_CASE , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
lowercase_ : Dict = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
lowercase_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = '''cpu'''
lowercase_ : List[str] = self.get_dummy_components()
lowercase_ : Optional[int] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = output.images[0]
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = torch_device == '''cpu'''
lowercase_ : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.get_dummy_components()
lowercase_ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = 1
lowercase_ : Any = 2
lowercase_ : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : Optional[int] = batch_size * [inputs[key]]
lowercase_ : Any = pipe(**__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase_ : Optional[Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase_ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowercase_ : Union[str, Any] = pipe(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 93 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
from math import pow
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a :List[Any] = int(pow(UpperCAmelCase_ , UpperCAmelCase_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a , a :List[Any] = backtrack(
UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a , a :List[str] = backtrack(
UpperCAmelCase_ , UpperCAmelCase_ , current_number + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
return current_sum, solutions_count
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(UpperCAmelCase_ , UpperCAmelCase_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 0 |
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
a__ : list =[]
for char_count in range(SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 95 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """ViTImageProcessor"""
lowerCamelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
_lowerCamelCase : Any = kwargs.pop('feature_extractor' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if visual_prompt is not None:
_lowerCamelCase : Optional[int] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
_lowerCamelCase : Tuple = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if visual_prompt is not None and images is not None:
_lowerCamelCase : Optional[Any] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_lowerCamelCase : List[Any] = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
@property
def A_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase , )
return self.image_processor | 96 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''') | 97 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def a_ ( ):
UpperCAmelCase__ = Node(1 )
UpperCAmelCase__ = Node(2 )
UpperCAmelCase__ = Node(3 )
UpperCAmelCase__ = Node(4 )
UpperCAmelCase__ = Node(5 )
return tree
def a_ ( lowerCamelCase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( lowerCamelCase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
if root is None:
return output
UpperCAmelCase__ = deque([root] )
while process_queue:
UpperCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase ):
if root is None:
return []
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = height(lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 0
return output
def a_ ( ): # Main function for testing.
UpperCAmelCase__ = make_tree()
print(f'''In-order Traversal: {inorder(lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(lowerCamelCase )}''' , '\n' )
print(f'''Height of Tree: {height(lowerCamelCase )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCamelCase , level=lowerCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 98 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowercase : Optional[Any] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( A__ ) -> Optional[Any]:
a__ : List[str] = None
# source code of `config_class`
a__ : Tuple = inspect.getsource(A__ )
a__ : Any = _re_checkpoint.findall(A__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
a__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a__ : str = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
a__ : Optional[int] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
a__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a__ : Dict = get_checkpoint_from_config_class(A__ )
a__ : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A__ )
if len(A__ ) > 0:
a__ : Dict = '\n'.join(sorted(A__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 99 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
"""simple docstring"""
import numpy as np
def _lowerCAmelCase ( UpperCamelCase_ ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='owlvit_text_model'
def __init__(self , a_=4_94_08 , a_=5_12 , a_=20_48 , a_=12 , a_=8 , a_=16 , a_="quick_gelu" , a_=1E-5 , a_=0.0 , a_=0.02 , a_=1.0 , a_=0 , a_=4_94_06 , a_=4_94_07 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : Any = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Any = intermediate_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Any = max_position_embeddings
__snake_case : Any = hidden_act
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = attention_dropout
__snake_case : Tuple = initializer_range
__snake_case : Tuple = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__snake_case , __snake_case : Optional[Any] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__snake_case : Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='owlvit_vision_model'
def __init__(self , a_=7_68 , a_=30_72 , a_=12 , a_=12 , a_=3 , a_=7_68 , a_=32 , a_="quick_gelu" , a_=1E-5 , a_=0.0 , a_=0.02 , a_=1.0 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : str = hidden_size
__snake_case : List[str] = intermediate_size
__snake_case : Dict = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : int = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Dict = hidden_act
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : str = attention_dropout
__snake_case : List[str] = initializer_range
__snake_case : str = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__snake_case : Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='owlvit'
lowerCamelCase__ =True
def __init__(self , a_=None , a_=None , a_=5_12 , a_=2.6592 , a_=True , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
if text_config is None:
__snake_case : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__snake_case : Optional[int] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__snake_case : str = OwlViTTextConfig(**a_ )
__snake_case : Union[str, Any] = OwlViTVisionConfig(**a_ )
__snake_case : List[str] = projection_dim
__snake_case : Optional[int] = logit_scale_init_value
__snake_case : List[str] = return_dict
__snake_case : List[Any] = 1.0
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , **a_ ):
'''simple docstring'''
cls._set_token_in_kwargs(a_ )
__snake_case , __snake_case : int = cls.get_config_dict(a_ , **a_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
@classmethod
def SCREAMING_SNAKE_CASE (cls , a_ , a_ , **a_ ):
'''simple docstring'''
__snake_case : str = {}
__snake_case : Dict = text_config
__snake_case : Optional[Any] = vision_config
return cls.from_dict(a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = copy.deepcopy(self.__dict__ )
__snake_case : Dict = self.text_config.to_dict()
__snake_case : Optional[int] = self.vision_config.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = None , ):
'''simple docstring'''
__snake_case : Union[str, Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=a_ , seq_length=a_ , framework=a_ )
__snake_case : Any = super().generate_dummy_inputs(
processor.image_processor , batch_size=a_ , framework=a_ )
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 14
| 102 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 0 |
def UpperCamelCase( __UpperCamelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'canine'
def __init__( self : str ,lowercase__ : Any=7_6_8 ,lowercase__ : List[str]=1_2 ,lowercase__ : Tuple=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : Tuple=1_6 ,lowercase__ : Any=0.0_2 ,lowercase__ : str=1e-1_2 ,lowercase__ : str=0 ,lowercase__ : Tuple=0xe_000 ,lowercase__ : Optional[int]=0xe_001 ,lowercase__ : List[str]=4 ,lowercase__ : List[str]=4 ,lowercase__ : List[Any]=8 ,lowercase__ : Optional[int]=1_6_3_8_4 ,lowercase__ : Union[str, Any]=1_2_8 ,**lowercase__ : List[str] ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
# Character config:
__lowercase = downsampling_rate
__lowercase = upsampling_kernel_size
__lowercase = num_hash_functions
__lowercase = num_hash_buckets
__lowercase = local_transformer_stride
| 104 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
"""simple docstring"""
import torch
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
if torch.cuda.is_available():
a : List[Any] = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 105 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Dict = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case__ :
"""simple docstring"""
def __UpperCAmelCase ( self : str , __lowerCamelCase : Dict ) -> List[Any]:
raise NotImplementedError()
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
raise NotImplementedError()
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , **__lowerCamelCase : str ) -> Any:
a = tokenizer
a = skip_prompt
a = decode_kwargs
# variables used in the streaming process
a = []
a = 0
a = True
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> List[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a = text[self.print_len :]
a = []
a = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a = text[self.print_len :]
self.print_len += len(__lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__lowerCamelCase )
self.on_finalized_text(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a = text[self.print_len :]
a = []
a = 0
else:
a = ""
a = True
self.on_finalized_text(__lowerCamelCase , stream_end=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Dict:
print(__lowerCamelCase , flush=__lowerCamelCase , end="" if not stream_end else None )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[float] = None , **__lowerCamelCase : Any ) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
a = Queue()
a = None
a = timeout
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple:
self.text_queue.put(__lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Dict ) -> str:
return self
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 107 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCAmelCase : Tuple = "-" if number.startswith("-" ) else ""
lowerCAmelCase : Union[str, Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"""{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 108 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Any = num_choices
UpperCAmelCase : str = scope
UpperCAmelCase : str = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : Optional[int] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase : int = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = inputs_dict["""labels"""]
UpperCAmelCase : List[Any] = inputs_dict["""labels"""]
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = OpenAIGPTModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
UpperCAmelCase : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 109 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowercase__ = ''''''
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase__ = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 110 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__UpperCAmelCase = """."""
if __name__ == "__main__":
__UpperCAmelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__UpperCAmelCase = []
__UpperCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__UpperCAmelCase = line.strip()
__UpperCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__UpperCAmelCase = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 323 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCamelCase = 12_8022
_UpperCamelCase = 12_8028
@require_sentencepiece
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int =MaMaaaTokenizer
UpperCAmelCase_ : int =False
UpperCAmelCase_ : Union[str, Any] =False
UpperCAmelCase_ : str =True
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__snake_case : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__snake_case : int = Path(self.tmpdirname )
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__snake_case : List[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Dict = "</s>"
__snake_case : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCAmelCase_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [2, 3, 4, 5, 6] , )
__snake_case : List[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
__snake_case : int = tokenizer.convert_tokens_to_string(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , "This is a test" )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = {"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="facebook/m2m100_418M"
UpperCAmelCase_ : List[Any] =[
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
UpperCAmelCase_ : List[str] =[
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
UpperCAmelCase_ : str =[EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCAmelCase ( cls ) -> Dict:
'''simple docstring'''
__snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__snake_case : List[Any] = 1
return cls
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128063 )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : str = "en"
__snake_case : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
__snake_case : Dict = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
__snake_case : int = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
__snake_case : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = tempfile.mkdtemp()
__snake_case : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase_ )
__snake_case : Tuple = MaMaaaTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase_ )
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = "en"
__snake_case : Any = "fr"
__snake_case : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors="pt" )
__snake_case : List[str] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__snake_case : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__snake_case : List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Any = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__snake_case : Tuple = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = BioGptTokenizer
lowercase = False
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
A_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = "lower newer"
A_ = "lower newer"
return input_text, output_text
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = BioGptTokenizer(self.vocab_file , self.merges_file )
A_ = "lower"
A_ = ["low", "er</w>"]
A_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
A_ = tokens + ["<unk>"]
A_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : str = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : Tuple = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase : Optional[Any] = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = None
# source code of `config_class`
lowerCamelCase_ = inspect.getsource(__a )
lowerCamelCase_ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCamelCase_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase_ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCamelCase_ = ckpt_name
break
return checkpoint
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase_ = get_checkpoint_from_config_class(__a )
lowerCamelCase_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
lowerCamelCase_ = "\n".join(sorted(__a ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase__ (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : int = """pix2struct_text_model"""
lowerCamelCase_ : Any = ["""past_key_values"""]
lowerCamelCase_ : str = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCamelCase__=5_0244 , UpperCamelCase__=768 , UpperCamelCase__=64 , UpperCamelCase__=2048 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=32 , UpperCamelCase__=128 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-6 , UpperCamelCase__=1.0 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0 , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
lowerCamelCase : Any = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Tuple = d_kv
lowerCamelCase : Optional[int] = d_ff
lowerCamelCase : Any = num_layers
lowerCamelCase : int = num_heads
lowerCamelCase : Any = relative_attention_num_buckets
lowerCamelCase : List[str] = relative_attention_max_distance
lowerCamelCase : Optional[int] = dropout_rate
lowerCamelCase : Optional[int] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_factor
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Union[str, Any] = eos_token_id
lowerCamelCase : List[Any] = decoder_start_token_id
# for backwards compatibility
lowerCamelCase : int = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCamelCase : List[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase__ (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = """pix2struct_vision_model"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=768 , UpperCamelCase__=2048 , UpperCamelCase__=64 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__="gelu_new" , UpperCamelCase__=1e-6 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=1.0 , UpperCamelCase__=4096 , UpperCamelCase__=32 , UpperCamelCase__=128 , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : str = hidden_size
lowerCamelCase : Union[str, Any] = patch_embed_hidden_size
lowerCamelCase : str = d_ff
lowerCamelCase : Any = dropout_rate
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : str = initializer_range
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : int = dense_act_fn
lowerCamelCase : Tuple = seq_len
lowerCamelCase : Optional[Any] = relative_attention_num_buckets
lowerCamelCase : int = relative_attention_max_distance
lowerCamelCase : Dict = d_kv
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCamelCase : str = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase__ (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """pix2struct"""
lowerCamelCase_ : List[str] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=1.0 , UpperCamelCase__=0.02 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> List[str]:
super().__init__(tie_word_embeddings=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
if text_config is None:
lowerCamelCase : List[str] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase : str = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase : int = PixaStructTextConfig(**UpperCAmelCase_ )
lowerCamelCase : int = PixaStructVisionConfig(**UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = self.text_config.decoder_start_token_id
lowerCamelCase : Tuple = self.text_config.pad_token_id
lowerCamelCase : List[str] = self.text_config.eos_token_id
lowerCamelCase : Union[str, Any] = initializer_factor
lowerCamelCase : str = initializer_range
lowerCamelCase : Optional[int] = self.initializer_range
lowerCamelCase : Dict = self.initializer_range
lowerCamelCase : str = is_vqa
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : int = copy.deepcopy(self.__dict__ )
lowerCamelCase : List[Any] = self.text_config.to_dict()
lowerCamelCase : List[str] = self.vision_config.to_dict()
lowerCamelCase : Dict = self.__class__.model_type
return output
| 48 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :List[Any] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_6 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=1_0_0_0 , ) -> Tuple:
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =patch_size
_lowercase =text_seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =coordinate_size
_lowercase =shape_size
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
_lowercase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase =text_seq_length
_lowercase =(image_size // patch_size) ** 2 + 1
_lowercase =self.text_seq_length + self.image_seq_length
def __A (self ) -> Any:
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase =bbox[i, j, 3]
_lowercase =bbox[i, j, 1]
_lowercase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase =bbox[i, j, 2]
_lowercase =bbox[i, j, 0]
_lowercase =t
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =LayoutLMvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# text + image
_lowercase =model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowercase =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowercase =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase =model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase =model(pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =self.num_labels
_lowercase =LayoutLMvaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =self.num_labels
_lowercase =LayoutLMvaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self ) -> Optional[Any]:
_lowercase =self.prepare_config_and_inputs()
(
_lowercase
) =config_and_inputs
_lowercase ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
return True
def __A (self ) -> Dict:
_lowercase =LayoutLMvaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7 )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
_lowercase =copy.deepcopy(UpperCAmelCase_ )
if model_class in get_values(UpperCAmelCase_ ):
_lowercase ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
_lowercase =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in get_values(UpperCAmelCase_ ):
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
_lowercase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def __A (self ) -> int:
self.config_tester.run_common_tests()
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def __A (self ) -> Optional[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def __A (self ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =LayoutLMvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None
@slow
def __A (self ) -> Optional[Any]:
_lowercase =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCAmelCase_ )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase_ )
_lowercase =torch.tensor([[1, 2]] )
_lowercase =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase =model(
input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , )
# verify the logits
_lowercase =torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
_lowercase =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 5 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple = 10**-10 ):
__UpperCAmelCase : str = a
while True:
__UpperCAmelCase : Optional[Any] = Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 114 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''prophetnet.tokenizer'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
lowerCAmelCase_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
lowerCAmelCase_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = collections.OrderedDict()
with open(__a , '''r''' , encoding='''utf-8''' ) as reader:
snake_case_ : int = reader.readlines()
for index, token in enumerate(__a ):
snake_case_ : List[str] = token.rstrip('''\n''' )
snake_case_ : List[Any] = index
return vocab
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__ , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[SEP]" , __magic_name__="[UNK]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__ = None , **__magic_name__ , ) -> None:
'''simple docstring'''
snake_case_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
snake_case_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case_ : Optional[int] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case_ : Optional[int] = F'''[unused{i}]'''
snake_case_ : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case_ : int = 12
snake_case_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_ )
def __getstate__(self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.__dict__.copy()
snake_case_ : Dict = None
return state
def __setstate__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ : Dict = {}
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Any = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : str = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ''' ''' ).strip()
return out_string
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , '''wb''' ) as fi:
snake_case_ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 279 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = torch.exp(__a )
UpperCAmelCase : List[Any] = torch.sum(__a , dim=1 ) # sum of exp(x_i)
UpperCAmelCase : Union[str, Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__a ) - B / A
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Dict = config.output_attentions
UpperCAmelCase : Dict = config.output_hidden_states
UpperCAmelCase : List[Any] = nn.ModuleList([BertLayer(UpperCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
UpperCAmelCase : str = nn.ModuleList([BertHighway(UpperCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
UpperCAmelCase : int = [-1 for _ in range(config.num_hidden_layers )]
def A_ ( self , snake_case ):
'''simple docstring'''
if (type(UpperCAmelCase_ ) is float) or (type(UpperCAmelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCAmelCase : Optional[int] = x
else:
UpperCAmelCase : int = x
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A_ ( self , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Any = ()
UpperCAmelCase : Optional[int] = ()
UpperCAmelCase : Optional[int] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCAmelCase : List[str] = all_hidden_states + (hidden_states,)
UpperCAmelCase : List[Any] = layer_module(
UpperCAmelCase_ , UpperCAmelCase_ , head_mask[i] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = layer_outputs[0]
if self.output_attentions:
UpperCAmelCase : Any = all_attentions + (layer_outputs[1],)
UpperCAmelCase : str = (hidden_states,)
if self.output_hidden_states:
UpperCAmelCase : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCAmelCase : List[Any] = current_outputs + (all_attentions,)
UpperCAmelCase : List[str] = self.highway[i](UpperCAmelCase_ )
# logits, pooled_output
if not self.training:
UpperCAmelCase : List[Any] = highway_exit[0]
UpperCAmelCase : List[str] = entropy(UpperCAmelCase_ )
UpperCAmelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCAmelCase : int = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCAmelCase : Dict = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase_ , i + 1 )
else:
UpperCAmelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
UpperCAmelCase : List[str] = (hidden_states,)
if self.output_hidden_states:
UpperCAmelCase : str = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCAmelCase : Dict = outputs + (all_attentions,)
UpperCAmelCase : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
UpperCAmelCase : Any = config
UpperCAmelCase : Tuple = BertEmbeddings(UpperCAmelCase_ )
UpperCAmelCase : str = DeeBertEncoder(UpperCAmelCase_ )
UpperCAmelCase : int = BertPooler(UpperCAmelCase_ )
self.init_weights()
def A_ ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def A_ ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = value
def A_ ( self , snake_case ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : int = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if encoder_attention_mask is None:
UpperCAmelCase : List[Any] = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
UpperCAmelCase : Dict = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCAmelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
UpperCAmelCase : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCAmelCase : List[str] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
UpperCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
UpperCAmelCase : List[Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = encoder_outputs[0]
UpperCAmelCase : Optional[Any] = self.pooler(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = message
UpperCAmelCase : int = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = BertPooler(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : int = nn.Linear(config.hidden_size , config.num_labels )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = encoder_outputs[0]
UpperCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
# "return" pooler_output
# BertModel
UpperCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCAmelCase : int = bmodel_output[1]
UpperCAmelCase : Union[str, Any] = self.dropout(UpperCAmelCase_ )
UpperCAmelCase : int = self.classifier(UpperCAmelCase_ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = config.num_labels
UpperCAmelCase : Tuple = config.num_hidden_layers
UpperCAmelCase : Dict = DeeBertModel(UpperCAmelCase_ )
UpperCAmelCase : Any = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_layers
try:
UpperCAmelCase : str = self.bert(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCAmelCase : Union[str, Any] = outputs[1]
UpperCAmelCase : List[Any] = self.dropout(UpperCAmelCase_ )
UpperCAmelCase : Dict = self.classifier(UpperCAmelCase_ )
UpperCAmelCase : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase : Any = e.message
UpperCAmelCase : Dict = e.exit_layer
UpperCAmelCase : List[str] = outputs[0]
if not self.training:
UpperCAmelCase : Optional[Any] = entropy(UpperCAmelCase_ )
UpperCAmelCase : str = []
UpperCAmelCase : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Optional[Any] = MSELoss()
UpperCAmelCase : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : str = CrossEntropyLoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase : Tuple = []
for highway_exit in outputs[-1]:
UpperCAmelCase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : int = MSELoss()
UpperCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : List[str] = CrossEntropyLoss()
UpperCAmelCase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase_ )
if train_highway:
UpperCAmelCase : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase : Any = (loss,) + outputs
if not self.training:
UpperCAmelCase : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 311 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : Optional[int] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 336 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(range(len(__a ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(__a , __a )]
index.sort(key=lambda lowerCamelCase_ : ratio[i] , reverse=__a )
SCREAMING_SNAKE_CASE : float = 0
SCREAMING_SNAKE_CASE : list[float] = [0] * len(__a )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[str] =["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = 8 , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : List[str] = do_pad
__snake_case : List[Any] = pad_size
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
__snake_case : Tuple = get_image_size(UpperCAmelCase_ )
__snake_case : Any = (old_height // size + 1) * size - old_height
__snake_case : Optional[int] = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = do_pad if do_pad is not None else self.do_pad
__snake_case : Dict = pad_size if pad_size is not None else self.pad_size
__snake_case : Any = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_rescale:
__snake_case : str = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_pad:
__snake_case : Tuple = [self.pad(UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
__snake_case : Tuple = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 326 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_MASKED_LM_MAPPING
lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case_ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-0_5, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-0_5, """token""": 25506, """token_str""": """ accuser"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-0_5,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-0_5,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-0_5, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-0_5, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS"""},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-0_5, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-0_5, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-0_5, """token""": 13606, """token_str""": """ Clara"""},
] , )
A_ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=6 ) , [
[
{
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-0_5,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
A_ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
@require_torch
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(UpperCAmelCase_ )
@slow
@require_tf
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(UpperCAmelCase_ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
A_ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
A_ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
A_ = None
A_ = None
self.run_pipeline_test(UpperCAmelCase_ , [] )
@require_tf
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
A_ = None
A_ = None
self.run_pipeline_test(UpperCAmelCase_ , [] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
A_ = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = fill_masker.tokenizer
A_ = fill_masker.model
A_ = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
A_ = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
A_ = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCAmelCase_ , [
[
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
],
[
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
],
] , )
with self.assertRaises(UpperCAmelCase_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCAmelCase_ ):
fill_masker("""This is""" )
self.run_test_top_k(UpperCAmelCase_ , UpperCAmelCase_ )
self.run_test_targets(UpperCAmelCase_ , UpperCAmelCase_ )
self.run_test_top_k_targets(UpperCAmelCase_ , UpperCAmelCase_ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCAmelCase_ , UpperCAmelCase_ )
self.fill_mask_with_multiple_masks(UpperCAmelCase_ , UpperCAmelCase_ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = sorted(vocab.keys() )[:2]
# Pipeline argument
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , targets=UpperCAmelCase_ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCAmelCase_ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCAmelCase_ ) )
# Call argument
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCAmelCase_ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCAmelCase_ ) )
# Score equivalence
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase_ )
A_ = [top_mask["token_str"] for top_mask in outputs]
A_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase_ ) == set(UpperCAmelCase_ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=UpperCAmelCase_ )
A_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCAmelCase_ ) , nested_simplify(UpperCAmelCase_ ) )
# Raises with invalid
with self.assertRaises(UpperCAmelCase_ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCAmelCase_ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(UpperCAmelCase_ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="""""" )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , top_k=2 )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
] , )
self.assertEqual(nested_simplify(UpperCAmelCase_ ) , nested_simplify(UpperCAmelCase_ ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# top_k=2, ntargets=3
A_ = sorted(vocab.keys() )[:3]
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCAmelCase_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A_ = [el["token_str"] for el in sorted(UpperCAmelCase_ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCAmelCase_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase_ ).issubset(UpperCAmelCase_ ):
A_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCAmelCase_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCAmelCase_ ) , nested_simplify(UpperCAmelCase_ ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
A_ = tokenizer.get_vocab()
# String duplicates + id duplicates
A_ = sorted(vocab.keys() )[:3]
A_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A_ = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=UpperCAmelCase_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCAmelCase_ ) , 3 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
A_ = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [
[
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
],
[
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
],
[
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
{"""sequence""": ANY(UpperCAmelCase_ ), """score""": ANY(UpperCAmelCase_ ), """token""": ANY(UpperCAmelCase_ ), """token_str""": ANY(UpperCAmelCase_ )},
],
] , )
| 162 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase : Tuple = logging.get_logger(__name__)
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , *A_ : int , **A_ : Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 204 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : Any = logging.getLogger(__name__)
class UpperCamelCase__ (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : int = """masked_bert"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="topK" , UpperCamelCase__="constant" , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : str = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : int = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : str = pruning_method
lowerCamelCase : Union[str, Any] = mask_init
lowerCamelCase : Optional[Any] = mask_scale
| 48 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
from typing import Any
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Any , lowerCAmelCase_ :int , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict , )->list:
'''simple docstring'''
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
snake_case_ = {}
snake_case_ = {}
for state in states_space:
snake_case_ = observations_space[0]
snake_case_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
snake_case_ = observations_space[o]
snake_case_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case_ = ""
snake_case_ = -1
for k_state in states_space:
snake_case_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case_ = probability
snake_case_ = k_state
# Update probabilities and pointers dicts
snake_case_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case_ = arg_max
# The final observation
snake_case_ = observations_space[len(__a ) - 1]
# argmax for given final observation
snake_case_ = ""
snake_case_ = -1
for k_state in states_space:
snake_case_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case_ = probability
snake_case_ = k_state
snake_case_ = arg_max
# Process pointers backwards
snake_case_ = last_state
snake_case_ = []
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
snake_case_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple , lowerCAmelCase_ :int , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict , )->None:
'''simple docstring'''
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Tuple , lowerCAmelCase_ :Any , lowerCAmelCase_ :int , lowerCAmelCase_ :str , )->None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :Union[str, Any] )->None:
'''simple docstring'''
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Any )->None:
'''simple docstring'''
if not isinstance(_object , __a ):
snake_case_ = F'''{var_name} must be a list'''
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
snake_case_ = F'''{var_name} must be a list of strings'''
raise ValueError(__a )
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Dict , )->None:
'''simple docstring'''
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Any )->None:
'''simple docstring'''
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Any = False )->None:
'''simple docstring'''
if not isinstance(_object , __a ):
snake_case_ = F'''{var_name} must be a dict'''
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
snake_case_ = F'''{var_name} all keys must be strings'''
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
snake_case_ = "nested dictionary " if nested else ""
snake_case_ = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase = "" , UpperCAmelCase = False ) -> None:
_lowercase ={}
# A node will be a leaf if the tree contains its word
_lowercase =is_leaf
_lowercase =prefix
def __A (self , UpperCAmelCase ) -> tuple[str, str, str]:
_lowercase =0
for q, w in zip(self.prefix , UpperCAmelCase_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __A (self , UpperCAmelCase ) -> None:
for word in words:
self.insert(UpperCAmelCase_ )
def __A (self , UpperCAmelCase ) -> None:
if self.prefix == word:
_lowercase =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowercase =RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_ )
else:
_lowercase =self.nodes[word[0]]
_lowercase =incoming_node.match(
UpperCAmelCase_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowercase =remaining_prefix
_lowercase =self.nodes[matching_string[0]]
_lowercase =RadixNode(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase =aux_node
if remaining_word == "":
_lowercase =True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_ )
def __A (self , UpperCAmelCase ) -> bool:
_lowercase =self.nodes.get(word[0] , UpperCAmelCase_ )
if not incoming_node:
return False
else:
_lowercase =incoming_node.match(
UpperCAmelCase_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_ )
def __A (self , UpperCAmelCase ) -> bool:
_lowercase =self.nodes.get(word[0] , UpperCAmelCase_ )
if not incoming_node:
return False
else:
_lowercase =incoming_node.match(
UpperCAmelCase_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_lowercase =list(self.nodes.values() )[0]
_lowercase =merging_node.is_leaf
self.prefix += merging_node.prefix
_lowercase =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_lowercase =False
# If there is 1 edge, we merge it with its child
else:
_lowercase =list(incoming_node.nodes.values() )[0]
_lowercase =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowercase =merging_node.nodes
return True
def __A (self , UpperCAmelCase = 0 ) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCAmelCase_ ( ) -> bool:
"""simple docstring"""
_lowercase ="banana bananas bandana band apple all beast".split()
_lowercase =RadixNode()
root.insert_many(__a )
assert all(root.find(__a ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase =RadixNode()
_lowercase ="banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__a )
print('''Words:''' , __a )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 5 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Dict ):
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Any ):
__UpperCAmelCase : Union[str, Any] = np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=70000 ):
__UpperCAmelCase : Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(__a ):
__UpperCAmelCase : Tuple = np.dot(__a , __a )
__UpperCAmelCase : Union[str, Any] = sigmoid_function(__a )
__UpperCAmelCase : Union[str, Any] = np.dot(x.T , h - y ) / y.size
__UpperCAmelCase : Optional[Any] = theta - alpha * gradient # updating the weights
__UpperCAmelCase : List[str] = np.dot(__a , __a )
__UpperCAmelCase : List[str] = sigmoid_function(__a )
__UpperCAmelCase : str = cost_function(__a , __a )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a : List[str] = datasets.load_iris()
a : Any = iris.data[:, :2]
a : Dict = (iris.target != 0) * 1
a : Tuple = 0.1
a : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __lowerCamelCase : int ):
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((a) ,(a)) : Dict = (x[:, 0].min(), x[:, 0].max())
((a) ,(a)) : int = (x[:, 1].min(), x[:, 1].max())
((a) ,(a)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
a : List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 114 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0xe0_00
lowerCAmelCase_ = 0xe0_01
lowerCAmelCase_ = 0xe0_02
lowerCAmelCase_ = 0xe0_03
lowerCAmelCase_ = 0xe0_04
# Maps special codepoints to human-readable names.
lowerCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=chr(UpperCAmelCase_ ) , __magic_name__=False , __magic_name__=2048 , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
snake_case_ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
snake_case_ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
snake_case_ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
snake_case_ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case_ : Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case_ : Dict = UNICODE_VOCAB_SIZE
snake_case_ : Optional[Any] = len(self._special_codepoints )
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
return list(UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
try:
return ord(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return "".join(UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
snake_case_ : List[str] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
snake_case_ : Any = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_ )) + [1]
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : int = [self.cls_token_id]
snake_case_ : List[str] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> str:
'''simple docstring'''
return ()
| 279 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Union[str, Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = """gpt_neox_japanese"""
def __init__( self : Dict, __A : List[Any]=3_2_0_0_0, __A : str=2_5_6_0, __A : Optional[int]=3_2, __A : Dict=3_2, __A : Any=4, __A : List[Any]="gelu", __A : Any=1.0_0, __A : List[Any]=1_0_0_0_0, __A : int=2_0_4_8, __A : Tuple=0.0_2, __A : str=1E-5, __A : Dict=True, __A : Any=3_1_9_9_6, __A : Optional[int]=3_1_9_9_9, __A : Any=0.1, __A : Tuple=0.0, **__A : Optional[int], ):
super().__init__(bos_token_id=UpperCAmelCase_, eos_token_id=UpperCAmelCase_, **UpperCAmelCase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_multiple_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : List[Any] = rotary_pct
UpperCAmelCase : Union[str, Any] = rotary_emb_base
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Any = use_cache
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Union[str, Any] = hidden_dropout
| 336 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""yjernite/retribert-base-uncased""": 512,
}
__UpperCAmelCase = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = RetriBertTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]="[UNK]" , lowerCamelCase_ : Any="[SEP]" , lowerCamelCase_ : List[str]="[PAD]" , lowerCamelCase_ : Optional[Any]="[CLS]" , lowerCamelCase_ : Optional[Any]="[MASK]" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Dict = getattr(UpperCAmelCase_ , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : int = do_lower_case
SCREAMING_SNAKE_CASE : int = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Tuple = normalizer_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = do_lower_case
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 323 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_UpperCamelCase = {
'''facebook/xglm-564M''': 2048,
}
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__snake_case : Tuple = 7
__snake_case : int = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__snake_case : str = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
__snake_case : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__snake_case : Any = len(self.sp_model )
__snake_case : Tuple = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase_ )
__snake_case : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : str = None
__snake_case : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : int = {}
__snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__snake_case : Any = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ ))
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ ))
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : str = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Optional[int] = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : str = "".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , " " ).strip()
return out_string
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
while second != 0:
A_ = first & second
first ^= second
A_ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input('''Enter the first number: ''').strip())
__lowerCamelCase = int(input('''Enter the second number: ''').strip())
print(f"""{add(first, second) = }""")
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = "▁"
lowerCamelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Dict = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCamelCase : Any = {
"google/pegasus-xsum": 512,
}
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PegasusTokenizer
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , A_ : List[Any]=None , A_ : int=None , A_ : Optional[int]="<pad>" , A_ : str="</s>" , A_ : str="<unk>" , A_ : int="<mask_2>" , A_ : Any="<mask_1>" , A_ : int=None , A_ : int=103 , **A_ : Dict , ) -> Any:
"""simple docstring"""
lowerCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is"""
f""" {type(UpperCAmelCase_ )}""" )
lowerCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCamelCase_ = additional_special_tokens_extended
else:
lowerCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : Tuple , A_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def a__ ( self : str , A_ : List , A_ : Optional[List] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a__ ( self : int , A_ : List[Any] , A_ : str=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self : List[str] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = "ZinengTang/tvlt-base"
lowerCamelCase : Dict = tempfile.mkdtemp()
def _lowercase ( self , **UpperCamelCase__ ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _lowercase ( self , **UpperCamelCase__ ) -> Any:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _lowercase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : Tuple = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Tuple = self.get_feature_extractor()
lowerCamelCase : int = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCamelCase : List[Any] = np.ones([1_2000] )
lowerCamelCase : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors="np" )
lowerCamelCase : Dict = processor(audio=UpperCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : List[Any] = self.get_feature_extractor()
lowerCamelCase : Any = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCamelCase : str = np.ones([3, 224, 224] )
lowerCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
lowerCamelCase : Union[str, Any] = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCamelCase : Any = np.ones([1_2000] )
lowerCamelCase : Any = np.ones([3, 224, 224] )
lowerCamelCase : Union[str, Any] = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : Any = self.get_feature_extractor()
lowerCamelCase : Tuple = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 48 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :List[Any] )->Dict:
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 159 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
def __init__(self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
_lowercase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_lowercase =3
_lowercase =do_lower_case
_lowercase =remove_space
_lowercase =keep_accents
_lowercase =vocab_file
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_lowercase =jieba
_lowercase =str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A (self ) -> Optional[int]:
return len(self.sp_model )
def __A (self ) -> Union[str, Any]:
_lowercase ={self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Optional[Any]:
_lowercase =self.__dict__.copy()
_lowercase =None
return state
def __setstate__(self , UpperCAmelCase ) -> int:
_lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowercase ={}
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
if self.remove_space:
_lowercase =" ".join(inputs.strip().split() )
else:
_lowercase =inputs
_lowercase =outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
_lowercase =unicodedata.normalize('''NFKD''' , UpperCAmelCase_ )
_lowercase ="".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
_lowercase =outputs.lower()
return outputs
def __A (self , UpperCAmelCase ) -> List[str]:
_lowercase =self.preprocess_text(UpperCAmelCase_ )
_lowercase =self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
_lowercase =[]
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_lowercase =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase =cur_pieces[1:]
else:
_lowercase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def __A (self , UpperCAmelCase ) -> Tuple:
return self.sp_model.PieceToId(UpperCAmelCase_ )
def __A (self , UpperCAmelCase ) -> List[Any]:
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def __A (self , UpperCAmelCase ) -> Tuple:
_lowercase ="".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ''' ''' ).strip()
return out_string
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowercase =os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , '''wb''' ) as fi:
_lowercase =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =super()._decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
_lowercase =text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 5 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__UpperCAmelCase : str = result + left + right
return input_list
def lowerCamelCase__ ( __lowerCamelCase : str ):
if len(__a ) <= 1:
return input_list
__UpperCAmelCase : Any = list(__a )
# iteration for two-way merging
__UpperCAmelCase : str = 2
while p <= len(__a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__a ) , __a ):
__UpperCAmelCase : Dict = i
__UpperCAmelCase : List[str] = i + p - 1
__UpperCAmelCase : int = (low + high + 1) // 2
__UpperCAmelCase : Optional[int] = merge(__a , __a , __a , __a )
# final merge of last two parts
if p * 2 >= len(__a ):
__UpperCAmelCase : List[Any] = i
__UpperCAmelCase : Optional[int] = merge(__a , 0 , __a , len(__a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
a : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
a : Optional[int] = []
else:
a : List[Any] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 114 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = len(__a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __a , __a , )
def lowerCamelCase_ ( _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : list[list[str]] = []
depth_first_search([] , [] , [] , __a , __a )
# Print all the boards
for board in boards:
for column in board:
print(__a )
print('''''' )
print(len(__a ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 279 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A_ ( snake_case ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def A_ ( self ):
'''simple docstring'''
raise NotImplementedError()
| 311 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Optional[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ) -> Optional[Any]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase : Optional[int] = "lm_head"
UpperCAmelCase : Dict = getattr(__a , __a )
if weight_type is not None:
UpperCAmelCase : str = getattr(__a , __a ).shape
else:
UpperCAmelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : int = value
elif weight_type == "bias":
UpperCAmelCase : List[str] = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ) -> Any:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[str] = fairseq_model.state_dict()
UpperCAmelCase : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : List[str] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : Optional[Any] = name.split(__a )[0].split('''.''' )[-2]
UpperCAmelCase : List[str] = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
UpperCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Tuple = "weight"
else:
UpperCAmelCase : List[Any] = None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ) -> Union[str, Any]:
UpperCAmelCase : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase : List[str] = name.split('''.''' )
UpperCAmelCase : str = int(items[0] )
UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Union[str, Any]=True ) -> int:
if config_path is not None:
UpperCAmelCase : str = UniSpeechConfig.from_pretrained(__a )
else:
UpperCAmelCase : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase : str = Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : int = target_dict.bos_index
UpperCAmelCase : Any = target_dict.eos_index
UpperCAmelCase : Dict = len(target_dict.symbols )
UpperCAmelCase : Optional[int] = os.path.join(__a , '''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : Optional[Any] = 42
UpperCAmelCase : List[Any] = 43
with open(__a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__a , __a )
UpperCAmelCase : List[str] = WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__a , )
UpperCAmelCase : Dict = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
UpperCAmelCase : int = UniSpeechForCTC(__a )
else:
UpperCAmelCase : int = UniSpeechForPreTraining(__a )
if is_finetuned:
UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase : List[str] = model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCamelCase : Any = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 336 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_00 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = x_start
SCREAMING_SNAKE_CASE : List[str] = fnc(__a )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for _ in range(__a ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Tuple = fnc(__a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : Optional[Any] = xa
SCREAMING_SNAKE_CASE : List[str] = fxa
return length
if __name__ == "__main__":
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__UpperCAmelCase = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 323 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] =BartphoTokenizer
UpperCAmelCase_ : str =False
UpperCAmelCase_ : str =True
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case : int = ["▁This", "▁is", "▁a", "▁t", "est"]
__snake_case : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__snake_case : List[Any] = {"unk_token": "<unk>"}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__snake_case : Dict = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = "This is a là test"
__snake_case : Optional[Any] = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case : List[Any] = "This is a là test"
__snake_case : Optional[int] = "▁This ▁is ▁a ▁l à ▁t est".split()
__snake_case : Optional[int] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Tuple = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
| 326 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
A_ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("""steps_offset!=1""" , """1.0.0""" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
A_ = dict(scheduler.config )
A_ = 1
A_ = FrozenDict(UpperCAmelCase_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
A_ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
A_ = dict(scheduler.config )
A_ = True
A_ = FrozenDict(UpperCAmelCase_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def snake_case_ ( self , UpperCamelCase__ = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A_ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ) -> str:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
A_ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
A_ = self.segmentation_model(**UpperCAmelCase_ )
A_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A_ = self.numpy_to_pil(UpperCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 162 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 204 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[str] = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'blip_2_vision_model'
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any]=1_4_0_8 , _lowerCAmelCase : int=6_1_4_4 , _lowerCAmelCase : Any=3_9 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : Union[str, Any]=2_2_4 , _lowerCAmelCase : Union[str, Any]=1_4 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Any=0.00_001 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Dict=1e-10 , _lowerCAmelCase : List[Any]=True , **_lowerCAmelCase : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_range
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = hidden_act
snake_case_ = qkv_bias
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
snake_case_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'blip_2_qformer'
def __init__( self : Union[str, Any] , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Tuple=7_6_8 , _lowerCAmelCase : str=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[Any]=3_0_7_2 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=5_1_2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Optional[int]=1e-12 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[int]="absolute" , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=1_4_0_8 , **_lowerCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = cross_attention_frequency
snake_case_ = encoder_hidden_size
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
snake_case_ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'blip-2'
_SCREAMING_SNAKE_CASE = True
def __init__( self : Dict , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=3_2 , **_lowerCAmelCase : str ) -> int:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
snake_case_ = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
snake_case_ = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
snake_case_ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case_ = BlipaVisionConfig(**UpperCAmelCase_ )
snake_case_ = BlipaQFormerConfig(**UpperCAmelCase_ )
snake_case_ = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case_ = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
snake_case_ = self.text_config.tie_word_embeddings
snake_case_ = self.text_config.is_encoder_decoder
snake_case_ = num_query_tokens
snake_case_ = self.vision_config.hidden_size
snake_case_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case_ = 1.0
snake_case_ = 0.02
@classmethod
def lowerCAmelCase__ ( cls : List[str] , _lowerCAmelCase : BlipaVisionConfig , _lowerCAmelCase : BlipaQFormerConfig , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.qformer_config.to_dict()
snake_case_ = self.text_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 159 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
import os
import string
import sys
UpperCAmelCase__ = 1 << 8
UpperCAmelCase__ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
UpperCAmelCase__ = KEYMAP['''up''']
UpperCAmelCase__ = KEYMAP['''left''']
if sys.platform == "win32":
UpperCAmelCase__ = []
UpperCAmelCase__ = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCAmelCase__ = ord(str(i))
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
import msvcrt
_lowercase ="mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__a ) == 0:
# Read the keystroke
_lowercase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowercase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowercase =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__a )
if ord(__a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowercase =chr(KEYMAP['''esc'''] )
except KeyError:
_lowercase =cha[1]
else:
_lowercase =ch.decode(__a )
else:
_lowercase =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowercase =sys.stdin.fileno()
_lowercase =termios.tcgetattr(__a )
try:
tty.setraw(__a )
_lowercase =sys.stdin.read(1 )
finally:
termios.tcsetattr(__a , termios.TCSADRAIN , __a )
return ch
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_lowercase =get_raw_chars()
if ord(__a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__a ) == KEYMAP["esc"]:
_lowercase =get_raw_chars()
if ord(__a ) == KEYMAP["mod_int"]:
_lowercase =get_raw_chars()
if ord(__a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 5 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
a : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls : List[str] ) -> str:
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
__UpperCAmelCase : Dict = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="""test-config""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__UpperCAmelCase : Optional[int] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
__UpperCAmelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="""valid_org/test-config-org""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__UpperCAmelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__UpperCAmelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> Dict:
__UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__UpperCAmelCase : Tuple = c.n_embd + 1 # int
__UpperCAmelCase : Optional[int] = c.resid_pdrop + 1.0 # float
__UpperCAmelCase : Dict = not c.scale_attn_weights # bool
__UpperCAmelCase : Any = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(UpperCAmelCase_ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = PretrainedConfig()
__UpperCAmelCase : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
__UpperCAmelCase : int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f""" {", ".join(UpperCAmelCase_ )}.""" )
def UpperCAmelCase ( self : List[Any] ) -> Any:
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
__UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = mock.Mock()
__UpperCAmelCase : Tuple = 500
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : str = HTTPError
__UpperCAmelCase : Tuple = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCAmelCase_ ) as mock_head:
__UpperCAmelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCAmelCase ( self : Tuple ) -> int:
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : Any = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__UpperCAmelCase : Any = ["config.42.0.0.json"]
__UpperCAmelCase : List[Any] = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , """config.4.0.0.json""" ) , os.path.join(UpperCAmelCase_ , """config.42.0.0.json""" ) )
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : List[str] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__UpperCAmelCase : Dict = "v4.0.0"
__UpperCAmelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__UpperCAmelCase : str = "v3.0.0"
__UpperCAmelCase : Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 114 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''Hello, World!'''
lowerCAmelCase_ = '''en_XX'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Path('''data_bin''' )
snake_case_ : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__a ).parent ) , checkpoint_file=Path(__a ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__a ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__a ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__a )
snake_case_ : List[str] = xmod.model.encoder.sentence_encoder
snake_case_ : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
snake_case_ : int = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __a )
snake_case_ : Optional[int] = XmodForSequenceClassification(__a ) if classification_head else XmodForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case_ : List[str] = xmod_sent_encoder.embed_tokens.weight
snake_case_ : str = xmod_sent_encoder.embed_positions.weight
snake_case_ : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
snake_case_ : str = xmod_sent_encoder.layernorm_embedding.weight
snake_case_ : List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case_ : List[Any] = model.roberta.encoder.layer[i]
snake_case_ : int = xmod_sent_encoder.layers[i]
# self attention
snake_case_ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
snake_case_ : Optional[Any] = xmod_layer.self_attn.q_proj.weight
snake_case_ : Dict = xmod_layer.self_attn.q_proj.bias
snake_case_ : Any = xmod_layer.self_attn.k_proj.weight
snake_case_ : str = xmod_layer.self_attn.k_proj.bias
snake_case_ : Any = xmod_layer.self_attn.v_proj.weight
snake_case_ : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
snake_case_ : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
snake_case_ : Any = xmod_layer.self_attn.out_proj.weight
snake_case_ : Union[str, Any] = xmod_layer.self_attn.out_proj.bias
snake_case_ : List[Any] = xmod_layer.self_attn_layer_norm.weight
snake_case_ : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
snake_case_ : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
snake_case_ : int = xmod_layer.fca.weight
snake_case_ : Optional[Any] = xmod_layer.fca.bias
# output
snake_case_ : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
snake_case_ : Dict = xmod_layer.fca.weight
snake_case_ : Any = xmod_layer.fca.bias
snake_case_ : Union[str, Any] = xmod_layer.final_layer_norm.weight
snake_case_ : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
snake_case_ : Any = xmod_layer.adapter_layer_norm.weight
snake_case_ : Optional[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
snake_case_ : Optional[int] = bert_output.adapter_modules[lang_code]
snake_case_ : str = xmod_layer.adapter_modules[lang_code]
snake_case_ : Optional[int] = from_adapter.fca.weight
snake_case_ : Dict = from_adapter.fca.bias
snake_case_ : Tuple = from_adapter.fca.weight
snake_case_ : List[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
snake_case_ : Optional[Any] = xmod_sent_encoder.layer_norm.weight
snake_case_ : Optional[int] = xmod_sent_encoder.layer_norm.bias
if classification_head:
snake_case_ : Optional[int] = xmod.model.classification_heads["mnli"].dense.weight
snake_case_ : List[str] = xmod.model.classification_heads["mnli"].dense.bias
snake_case_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
snake_case_ : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case_ : int = xmod.model.encoder.lm_head.dense.weight
snake_case_ : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
snake_case_ : int = xmod.model.encoder.lm_head.layer_norm.weight
snake_case_ : Any = xmod.model.encoder.lm_head.layer_norm.bias
snake_case_ : Tuple = xmod.model.encoder.lm_head.weight
snake_case_ : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case_ : Optional[int] = xmod.encode(__a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__a )
snake_case_ : str = model(__a )[0]
if classification_head:
snake_case_ : Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(__a ) )
else:
snake_case_ : Any = xmod.model(__a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
snake_case_ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case_ : Optional[int] = torch.allclose(__a , __a , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__a ).mkdir(parents=__a , exist_ok=__a )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 279 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset([] )
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCAmelCase : Optional[Any] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_zero=UpperCAmelCase_ , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
UpperCAmelCase : Dict = CLIPTextModel(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = floats_tensor((1, 1_6, 1_6) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
UpperCAmelCase : Tuple = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
UpperCAmelCase : List[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" )
if str(UpperCAmelCase_ ).startswith("mps" ):
UpperCAmelCase : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : List[str] = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
UpperCAmelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Any = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("RGB" )
if str(UpperCAmelCase_ ).startswith("mps" ):
UpperCAmelCase : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
UpperCAmelCase : int = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
UpperCAmelCase : Dict = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def A_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = pipe(**UpperCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : int = self.pipeline_class.from_pretrained(UpperCAmelCase_ )
pipe_loaded.to(UpperCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
UpperCAmelCase : int = pipe_loaded(**UpperCAmelCase_ )[0]
UpperCAmelCase : int = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase_ , 1e-4 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = "cpu"
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = self.get_dummy_mask_inputs(UpperCAmelCase_ )
UpperCAmelCase : Dict = pipe.generate_mask(**UpperCAmelCase_ )
UpperCAmelCase : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
UpperCAmelCase : int = np.array([0] * 9 )
UpperCAmelCase : Tuple = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = "cpu"
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = pipe.invert(**UpperCAmelCase_ ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCAmelCase : int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 )
def A_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "cpu"
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"beta_start": 0.0_0085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler(**UpperCAmelCase_ )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler(**UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase : Dict = self.get_dummy_inversion_inputs(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = pipe.invert(**UpperCAmelCase_ ).images
UpperCAmelCase : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCAmelCase : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A_ ( cls ):
'''simple docstring'''
UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
UpperCAmelCase : Optional[int] = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) )
UpperCAmelCase : Any = raw_image
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase : int = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = "a bowl of fruit"
UpperCAmelCase : List[str] = "a bowl of pears"
UpperCAmelCase : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
UpperCAmelCase : Optional[Any] = pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_ ).latents
UpperCAmelCase : Optional[int] = pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
UpperCAmelCase : int = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCAmelCase : Dict = "a bowl of fruit"
UpperCAmelCase : Optional[Any] = "a bowl of pears"
UpperCAmelCase : int = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_ , num_inference_steps=2_5 , ).latents
UpperCAmelCase : List[Any] = pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="numpy" , ).images[0]
UpperCAmelCase : List[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 311 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 0 |
import numpy as np
def a__ ( UpperCAmelCase : Optional[int] ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def a__ ( UpperCAmelCase : Tuple ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''new-model'''
if is_tf_available():
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = NewModelConfig
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "bert-base-cased"
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "bert-base-cased"
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_44_10 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase_ ) , 1_44_10 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE : Dict = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
auto_class.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Tuple = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE : List[str] = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE : List[Any] = auto_class.from_config(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = auto_class.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained(UpperCAmelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , """Use `from_pt=True` to load this model""" ):
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 323 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Any =["image_processor"]
UpperCAmelCase_ : Optional[Any] ="SamImageProcessor"
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
__snake_case : Union[str, Any] = self.image_processor
__snake_case : str = -10
__snake_case : Tuple = self.image_processor.size["longest_edge"]
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
__snake_case : Any = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# pop arguments that are not used in the foward but used nevertheless
__snake_case : Tuple = encoding_image_processor["original_sizes"]
if hasattr(UpperCAmelCase_ , "numpy" ): # Checks if Torch or TF tensor
__snake_case : Optional[Any] = original_sizes.numpy()
__snake_case : Any = self._check_and_preprocess_points(
input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , )
__snake_case : Optional[int] = self._normalize_and_convert(
UpperCAmelCase_ , UpperCAmelCase_ , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , )
return encoding_image_processor
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , ) -> List[str]:
'''simple docstring'''
if input_points is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
__snake_case : int = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0] ) for point in input_points
]
else:
__snake_case : Tuple = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_ )
for point, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__snake_case : Union[str, Any] = self._pad_points_and_labels(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Optional[Any] = np.array(UpperCAmelCase_ )
if input_labels is not None:
__snake_case : Tuple = np.array(UpperCAmelCase_ )
if input_boxes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
__snake_case : Union[str, Any] = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0] , is_bounding_box=UpperCAmelCase_ )
for box in input_boxes
]
else:
__snake_case : List[Any] = [
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_ , is_bounding_box=UpperCAmelCase_ )
for box, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
__snake_case : Optional[int] = np.array(UpperCAmelCase_ )
if input_boxes is not None:
if return_tensors == "pt":
__snake_case : int = torch.from_numpy(UpperCAmelCase_ )
# boxes batch size of 1 by default
__snake_case : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__snake_case : Tuple = tf.convert_to_tensor(UpperCAmelCase_ )
# boxes batch size of 1 by default
__snake_case : Optional[int] = tf.expand_dims(UpperCAmelCase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__snake_case : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
# point batch size of 1 by default
__snake_case : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__snake_case : Tuple = tf.convert_to_tensor(UpperCAmelCase_ )
# point batch size of 1 by default
__snake_case : Union[str, Any] = tf.expand_dims(UpperCAmelCase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__snake_case : Optional[int] = torch.from_numpy(UpperCAmelCase_ )
# point batch size of 1 by default
__snake_case : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__snake_case : Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase_ )
# point batch size of 1 by default
__snake_case : Optional[int] = tf.expand_dims(UpperCAmelCase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : int = max([point.shape[0] for point in input_points] )
__snake_case : Optional[int] = []
for i, point in enumerate(UpperCAmelCase_ ):
if point.shape[0] != expected_nb_points:
__snake_case : int = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__snake_case : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCAmelCase_ )
__snake_case : Union[str, Any] = processed_input_points
return input_points, input_labels
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> np.ndarray:
'''simple docstring'''
__snake_case : str = original_size
__snake_case : str = self.image_processor._get_preprocess_shape(UpperCAmelCase_ , longest_edge=UpperCAmelCase_ )
__snake_case : Optional[int] = deepcopy(UpperCAmelCase_ ).astype(UpperCAmelCase_ )
if is_bounding_box:
__snake_case : Optional[int] = coords.reshape(-1 , 2 , 2 )
__snake_case : Any = coords[..., 0] * (new_w / old_w)
__snake_case : Any = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__snake_case : str = coords.reshape(-1 , 4 )
return coords
def UpperCAmelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if input_points is not None:
if hasattr(UpperCAmelCase_ , "numpy" ): # Checks for TF or Torch tensor
__snake_case : List[str] = input_points.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not isinstance(input_points[0] , UpperCAmelCase_ ):
raise ValueError("Input points must be a list of list of floating points." )
__snake_case : Dict = [np.array(UpperCAmelCase_ ) for input_point in input_points]
else:
__snake_case : List[str] = None
if input_labels is not None:
if hasattr(UpperCAmelCase_ , "numpy" ):
__snake_case : str = input_labels.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not isinstance(input_labels[0] , UpperCAmelCase_ ):
raise ValueError("Input labels must be a list of list integers." )
__snake_case : Tuple = [np.array(UpperCAmelCase_ ) for label in input_labels]
else:
__snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(UpperCAmelCase_ , "numpy" ):
__snake_case : str = input_boxes.numpy().tolist()
if (
not isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
or not isinstance(input_boxes[0] , UpperCAmelCase_ )
or not isinstance(input_boxes[0][0] , UpperCAmelCase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
__snake_case : int = [np.array(UpperCAmelCase_ ).astype(np.floataa ) for box in input_boxes]
else:
__snake_case : Any = None
return input_points, input_labels, input_boxes
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCAmelCase_ ) )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_masks(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(__a, __a ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__ ( ) -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""", number=1_00_00, globals=globals(), ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""", number=1_00_00, globals=globals(), ) )
benchmark()
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Dict = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase : List[str] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class A:
'''simple docstring'''
def __call__( self : Optional[int] , A_ : str , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Union[bool, str] = False , A_ : Union[bool, str] = False , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[bool] = None , **A_ : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
elif titles is None or texts is None:
lowerCamelCase_ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase_ = titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [titles]
lowerCamelCase_ = texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [texts]
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts.""" )
lowerCamelCase_ = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
lowerCamelCase_ = super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
lowerCamelCase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ = attention_mask
return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
def a__ ( self : List[Any] , A_ : BatchEncoding , A_ : DPRReaderOutput , A_ : int = 16 , A_ : int = 64 , A_ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = reader_input["input_ids"]
lowerCamelCase_ = reader_output[:3]
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = sorted(range(UpperCAmelCase_ ) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__ )
lowerCamelCase_ = []
for doc_id in sorted_docs:
lowerCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self : Union[str, Any] , A_ : List[int] , A_ : List[int] , A_ : int , A_ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda A_ : x[1] , reverse=UpperCAmelCase_ )
lowerCamelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
lowerCamelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 5_0257 , UpperCamelCase__ = 1024 , UpperCamelCase__ = 768 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = None , UpperCamelCase__ = "gelu_new" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 1e-5 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = False , ) -> str:
super().__init__()
lowerCamelCase : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
lowerCamelCase : List[str] = prefix_inner_dim
lowerCamelCase : Union[str, Any] = prefix_hidden_dim
lowerCamelCase : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase : Any = (
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase : Dict = GPTaConfig(
vocab_size=UpperCAmelCase_ , n_positions=UpperCAmelCase_ , n_embd=UpperCAmelCase_ , n_layer=UpperCAmelCase_ , n_head=UpperCAmelCase_ , n_inner=UpperCAmelCase_ , activation_function=UpperCAmelCase_ , resid_pdrop=UpperCAmelCase_ , embd_pdrop=UpperCAmelCase_ , attn_pdrop=UpperCAmelCase_ , layer_norm_epsilon=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , scale_attn_weights=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , scale_attn_by_inverse_layer_idx=UpperCAmelCase_ , reorder_and_upcast_attn=UpperCAmelCase_ , )
lowerCamelCase : int = GPTaLMHeadModel(UpperCAmelCase_ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> Optional[Any]:
lowerCamelCase : str = self.transformer.transformer.wte(UpperCAmelCase_ )
lowerCamelCase : List[str] = self.encode_prefix(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = self.decode_prefix(UpperCAmelCase_ )
lowerCamelCase : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase : Tuple = self.transformer(inputs_embeds=UpperCAmelCase_ , labels=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> torch.Tensor:
return torch.zeros(UpperCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase_ )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
return self.encode_prefix(UpperCAmelCase_ )
@torch.no_grad()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : Optional[Any] = torch.split(UpperCAmelCase_ , 1 , dim=0 )
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = []
for feature in features:
lowerCamelCase : Tuple = self.decode_prefix(feature.to(UpperCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase : Tuple = self.generate_beam(
input_embeds=UpperCAmelCase_ , device=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase : Union[str, Any] = torch.stack(UpperCAmelCase_ )
lowerCamelCase : List[Any] = torch.stack(UpperCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = 5 , UpperCamelCase__ = 67 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = None , ) -> List[Any]:
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : str = None
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.int )
lowerCamelCase : Any = torch.zeros(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase : Union[str, Any] = input_embeds
else:
lowerCamelCase : Optional[int] = self.transformer.transformer.wte(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
lowerCamelCase : Dict = self.transformer(inputs_embeds=UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = outputs.logits
lowerCamelCase : Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase : Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase : str = logits.topk(UpperCAmelCase_ , -1 )
lowerCamelCase : List[str] = generated.expand(UpperCAmelCase_ , *generated.shape[1:] )
lowerCamelCase : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase : Optional[int] = next_tokens
else:
lowerCamelCase : Optional[Any] = tokens.expand(UpperCAmelCase_ , *tokens.shape[1:] )
lowerCamelCase : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase : Any = -float(np.inf )
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase : int = scores_sum / seq_lengths[:, None]
lowerCamelCase : Any = scores_sum_average.view(-1 ).topk(UpperCAmelCase_ , -1 )
lowerCamelCase : List[Any] = next_tokens // scores_sum.shape[1]
lowerCamelCase : str = seq_lengths[next_tokens_source]
lowerCamelCase : Optional[int] = next_tokens % scores_sum.shape[1]
lowerCamelCase : Optional[Any] = next_tokens.unsqueeze(1 )
lowerCamelCase : Dict = tokens[next_tokens_source]
lowerCamelCase : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase : List[Any] = generated[next_tokens_source]
lowerCamelCase : List[str] = scores_sum_average * seq_lengths
lowerCamelCase : Tuple = is_stopped[next_tokens_source]
lowerCamelCase : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase : List[str] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase : List[Any] = is_stopped + next_tokens.eq(UpperCAmelCase_ ).squeeze()
if is_stopped.all():
break
lowerCamelCase : Dict = scores / seq_lengths
lowerCamelCase : Dict = scores.argsort(descending=UpperCAmelCase_ )
# tokens tensors are already padded to max_seq_length
lowerCamelCase : Tuple = [tokens[i] for i in order]
lowerCamelCase : List[Any] = torch.stack(UpperCAmelCase_ , dim=0 )
lowerCamelCase : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 48 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->int:
'''simple docstring'''
if n == 1 or not isinstance(__a , __a ):
return 0
elif n == 2:
return 1
else:
snake_case_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] )->int:
'''simple docstring'''
snake_case_ = 0
snake_case_ = 2
while digits < n:
index += 1
snake_case_ = len(str(fibonacci(__a ) ) )
return index
def _lowerCAmelCase ( lowerCAmelCase_ :str = 1_000 )->int:
'''simple docstring'''
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 159 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase__ = '''main'''
# Default branch name
UpperCAmelCase__ = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
UpperCAmelCase__ = '''aaaaaaa'''
# This commit does not exist, so we should 404.
UpperCAmelCase__ = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase__ = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Optional[int]:
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class lowerCamelCase__ ( unittest.TestCase):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __A (self , UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __A (self , UpperCAmelCase ) -> Any:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __A (self , UpperCAmelCase ) -> List[Any]:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __A (self ) -> Union[str, Any]:
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''start_positions''', '''end_positions'''] )
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
pass
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels'''] )
@require_tf
def __A (self ) -> Optional[Any]:
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''start_positions''', '''end_positions'''] )
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
pass
self.assertEqual(find_labels(UpperCAmelCase_ ) , ['''labels'''] )
@require_flax
def __A (self ) -> Any:
self.assertEqual(find_labels(UpperCAmelCase_ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase_ ) , [] )
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
pass
self.assertEqual(find_labels(UpperCAmelCase_ ) , [] )
| 5 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = tmp_path / "file.csv"
__UpperCAmelCase : Optional[int] = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : str = tmp_path / "malformed_file.csv"
__UpperCAmelCase : List[Any] = textwrap.dedent(
"""\\n header1,header2\n 1,2\n 10,20,\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = tmp_path / "csv_with_image.csv"
__UpperCAmelCase : str = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = tmp_path / "csv_with_label.csv"
__UpperCAmelCase : Optional[Any] = textwrap.dedent(
"""\\n label\n good\n bad\n good\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Dict = tmp_path / "csv_with_int_list.csv"
__UpperCAmelCase : str = textwrap.dedent(
"""\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n """ )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : List[Any] = Csv()
__UpperCAmelCase : Union[str, Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase__ ( __lowerCamelCase : str ):
with open(__a , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Optional[Any] = f.read().splitlines()[1]
__UpperCAmelCase : Any = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__UpperCAmelCase : int = csv._generate_tables([[csv_file_with_image]] )
__UpperCAmelCase : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__UpperCAmelCase : List[str] = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
with open(__a , encoding="""utf-8""" ) as f:
__UpperCAmelCase : str = f.read().splitlines()[1:]
__UpperCAmelCase : Any = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__UpperCAmelCase : Optional[int] = csv._generate_tables([[csv_file_with_label]] )
__UpperCAmelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__UpperCAmelCase : Optional[int] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__a ) for label in labels]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : List[Any] = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __lowerCamelCase : [int(__a ) for i in x.split()]} )
__UpperCAmelCase : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
__UpperCAmelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__UpperCAmelCase : Any = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 114 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
snake_case_ : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case_ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case_ : Any = np.concatenate(__a , axis=0 )
snake_case_ : str = np.array(__a ).astype(np.floataa ) / 255.0
snake_case_ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case_ : Union[str, Any] = 2.0 * image - 1.0
snake_case_ : Any = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
snake_case_ : Dict = torch.cat(__a , dim=0 )
return image
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0.9_995 ) -> str:
"""simple docstring"""
if not isinstance(__a , np.ndarray ):
snake_case_ : int = True
snake_case_ : Dict = va.device
snake_case_ : List[Any] = va.cpu().numpy()
snake_case_ : List[str] = va.cpu().numpy()
snake_case_ : Optional[int] = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
snake_case_ : str = (1 - t) * va + t * va
else:
snake_case_ : List[str] = np.arccos(__a )
snake_case_ : Tuple = np.sin(__a )
snake_case_ : str = theta_a * t
snake_case_ : List[Any] = np.sin(__a )
snake_case_ : Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case_ : int = sin_theta_t / sin_theta_a
snake_case_ : List[Any] = sa * va + sa * va
if inputs_are_torch:
snake_case_ : int = torch.from_numpy(__a ).to(__a )
return va
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = F.normalize(__a , dim=-1 )
snake_case_ : Union[str, Any] = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
for param in model.parameters():
snake_case_ : Any = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , clip_model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , coca_model=UpperCAmelCase_ , coca_tokenizer=UpperCAmelCase_ , coca_transform=UpperCAmelCase_ , )
snake_case_ : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase_ )
else feature_extractor.size["shortest_edge"]
)
snake_case_ : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCAmelCase_ )
set_requires_grad(self.clip_model , UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : str = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
snake_case_ : Tuple = max(num_inference_steps - init_timestep , 0 )
snake_case_ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase_ )}''' )
snake_case_ : Optional[int] = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ )
]
snake_case_ : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
else:
snake_case_ : Tuple = self.vae.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : List[str] = 0.18_215 * init_latents
snake_case_ : Optional[int] = init_latents.repeat_interleave(UpperCAmelCase_ , dim=0 )
snake_case_ : List[Any] = randn_tensor(init_latents.shape , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
snake_case_ : str = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : Optional[int] = init_latents
return latents
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.coca_transform(UpperCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case_ : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case_ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extractor.preprocess(UpperCAmelCase_ )
snake_case_ : Optional[Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case_ : Any = self.clip_model.get_image_features(UpperCAmelCase_ )
snake_case_ : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
snake_case_ : Union[str, Any] = image_embeddings_clip.repeat_interleave(UpperCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
'''simple docstring'''
snake_case_ : str = latents.detach().requires_grad_()
snake_case_ : int = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
snake_case_ : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case_ : List[Any] = self.scheduler.alphas_cumprod[timestep]
snake_case_ : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ : int = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case_ : Any = torch.sqrt(UpperCAmelCase_ )
snake_case_ : Optional[int] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase_ ):
snake_case_ : Any = self.scheduler.sigmas[index]
snake_case_ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : Any = 1 / 0.18_215 * sample
snake_case_ : Any = self.vae.decode(UpperCAmelCase_ ).sample
snake_case_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : List[Any] = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase_ )
snake_case_ : Dict = self.normalize(UpperCAmelCase_ ).to(latents.dtype )
snake_case_ : Union[str, Any] = self.clip_model.get_image_features(UpperCAmelCase_ )
snake_case_ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_ )
snake_case_ : List[Any] = spherical_dist_loss(UpperCAmelCase_ , UpperCAmelCase_ ).mean() * clip_guidance_scale
snake_case_ : Dict = -torch.autograd.grad(UpperCAmelCase_ , UpperCAmelCase_ )[0]
if isinstance(self.scheduler , UpperCAmelCase_ ):
snake_case_ : Dict = latents.detach() + grads * (sigma**2)
snake_case_ : List[str] = noise_pred_original
else:
snake_case_ : int = noise_pred_original - torch.sqrt(UpperCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 0.6 , __magic_name__ = 50 , __magic_name__ = 7.5 , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 100 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , __magic_name__ = 0.8 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , ) -> Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(UpperCAmelCase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(UpperCAmelCase_ , torch.Generator ) and batch_size > 1:
snake_case_ : Optional[Any] = [generator] + [None] * (batch_size - 1)
snake_case_ : Optional[Any] = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
snake_case_ : str = [x[0] for x in coca_is_none if x[1]]
snake_case_ : Optional[int] = ", ".join(UpperCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ : List[str] = self.get_image_description(UpperCAmelCase_ )
if style_prompt is None:
if len(UpperCAmelCase_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
snake_case_ : Optional[int] = self.get_image_description(UpperCAmelCase_ )
# get prompt text embeddings for content and style
snake_case_ : int = self.tokenizer(
UpperCAmelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='''pt''' , )
snake_case_ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case_ : Tuple = self.tokenizer(
UpperCAmelCase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors='''pt''' , )
snake_case_ : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case_ : Tuple = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
snake_case_ : Dict = text_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# set timesteps
snake_case_ : Dict = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case_ : str = {}
if accepts_offset:
snake_case_ : Tuple = 1
self.scheduler.set_timesteps(UpperCAmelCase_ , **UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case_ : Optional[Any] = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
snake_case_ : Optional[Any] = timesteps[:1].repeat(UpperCAmelCase_ )
# Preprocess image
snake_case_ : List[Any] = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : List[str] = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
snake_case_ : Tuple = preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : int = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_ )
snake_case_ : Tuple = slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if clip_guidance_scale > 0:
snake_case_ : Optional[int] = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : List[Any] = self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : Optional[Any] = slerp(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ : Optional[Any] = content_text_input.input_ids.shape[-1]
snake_case_ : int = self.tokenizer([''''''] , padding='''max_length''' , max_length=UpperCAmelCase_ , return_tensors='''pt''' )
snake_case_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case_ : Optional[Any] = uncond_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case_ : int = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='''cpu''' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
snake_case_ : Any = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
snake_case_ : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Dict = {}
if accepts_eta:
snake_case_ : Optional[Any] = eta
# check if the scheduler accepts generator
snake_case_ : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case_ : Optional[int] = generator
with self.progress_bar(total=UpperCAmelCase_ ):
for i, t in enumerate(UpperCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Dict = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
snake_case_ : int = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case_ : Any = noise_pred.chunk(2 )
snake_case_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case_ : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case_ : Tuple = self.cond_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case_ : int = 1 / 0.18_215 * latents
snake_case_ : str = self.vae.decode(UpperCAmelCase_ ).sample
snake_case_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 279 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : List[str] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "canine"
def __init__( self , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_6_3_8_4 , snake_case=1_6 , snake_case=0.02 , snake_case=1e-12 , snake_case=0 , snake_case=0xE_000 , snake_case=0xE_001 , snake_case=4 , snake_case=4 , snake_case=8 , snake_case=1_6_3_8_4 , snake_case=1_2_8 , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Dict = layer_norm_eps
# Character config:
UpperCAmelCase : str = downsampling_rate
UpperCAmelCase : Optional[int] = upsampling_kernel_size
UpperCAmelCase : Union[str, Any] = num_hash_functions
UpperCAmelCase : List[str] = num_hash_buckets
UpperCAmelCase : List[Any] = local_transformer_stride
| 311 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase : Optional[Any] = k.replace(__a , __a )
if k.startswith('''encoder''' ):
UpperCAmelCase : Optional[Any] = k.replace('''.attn''' , '''.self_attn''' )
UpperCAmelCase : Union[str, Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase : Any = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
UpperCAmelCase : List[str] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase : Dict = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
UpperCAmelCase : Union[str, Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def a__ ( UpperCAmelCase : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Any = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCAmelCase : List[Any] = sd.pop(__a )
UpperCAmelCase : Union[str, Any] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
UpperCAmelCase : Optional[Any] = v
_lowerCamelCase : str = ["START"]
@torch.no_grad()
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ) -> int:
UpperCAmelCase : List[Any] = torch.load(__a , map_location='''cpu''' )
UpperCAmelCase : int = model["model"]
UpperCAmelCase : int = BlenderbotConfig.from_json_file(__a )
UpperCAmelCase : List[Any] = BlenderbotForConditionalGeneration(__a )
UpperCAmelCase : List[str] = m.model.state_dict().keys()
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase : Union[str, Any] = rename_state_dict_key(__a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase : Any = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__a )
m.model.load_state_dict(__a , strict=__a )
m.half()
m.save_pretrained(__a )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 336 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 323 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_UpperCamelCase = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__( lowercase : Optional[int] ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__( lowercase : int , lowercase : int ) -> List[str]:
if args.student_type == "roberta":
__snake_case : int = False
elif args.student_type == "gpt2":
__snake_case : Optional[int] = False
def lowerCAmelCase__( lowercase : str , lowercase : Union[str, Any] ) -> Tuple:
if args.student_type == "roberta":
__snake_case : str = False
def lowerCAmelCase__( ) -> int:
__snake_case : int = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__a , required=__a , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__a , required=__a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__a , choices=["distilbert", "roberta", "gpt2"] , required=__a , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__a , required=__a , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__a , type=__a , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__a , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__a , required=__a , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__a , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__a , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__a , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__a , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__a , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.1_5 , type=__a , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__a , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__a , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__a , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__a , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__a , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__a , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__a , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.0_5 , type=__a , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__a , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=__a , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=__a , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__a , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.0_2 , type=__a , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__a , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__a , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__a , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__a , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__a , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__a , default=4000 , help="Checkpoint interval." )
__snake_case : int = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
__snake_case : int = MODEL_CLASSES[args.student_type]
__snake_case : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__snake_case : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case : str = tokenizer.all_special_tokens.index(__a )
__snake_case : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
__snake_case : Tuple = special_tok_ids
__snake_case : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
__snake_case : int = pickle.load(__a )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
__snake_case : Union[str, Any] = pickle.load(__a )
__snake_case : List[Any] = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case : Optional[Any] = 0.0 # do not predict special tokens
__snake_case : Optional[Any] = torch.from_numpy(__a )
else:
__snake_case : int = None
__snake_case : Tuple = LmSeqsDataset(params=__a , data=__a )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
__snake_case : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__snake_case : Any = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
__snake_case : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
__snake_case : int = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
__snake_case : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case : List[Any] = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 326 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class A__ :
def __init__( self ) -> None:
'''simple docstring'''
A_ = [2, 1, 2, -1]
A_ = [1, 2, 3, 4]
def snake_case_ ( self ) -> list[float]:
'''simple docstring'''
A_ = len(self.first_signal )
A_ = len(self.second_signal )
A_ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
A_ = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
A_ = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
A_ = np.matmul(np.transpose(UpperCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 162 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 0 |
import fire
from utils import calculate_rouge, save_json
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : List[Any]=None , **lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = [x.strip() for x in open(__a ).readlines()]
lowerCamelCase_ = [x.strip() for x in open(__a ).readlines()][: len(__a )]
lowerCamelCase_ = calculate_rouge(__a , __a , **__a )
if save_path is not None:
save_json(__a , __a , indent=__a )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 204 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
snake_case_ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
snake_case_ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
snake_case_ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
snake_case_ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
snake_case_ = orig_key.split("." )[0].split("_" )[-1]
snake_case_ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case_ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
snake_case_ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
snake_case_ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
snake_case_ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
snake_case_ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
snake_case_ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
snake_case_ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
snake_case_ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
snake_case_ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
snake_case_ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
snake_case_ = "yoso." + orig_key
return orig_key
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Dict )->Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(__a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case_ = val
snake_case_ = orig_state_dict["cls.predictions.decoder.bias"]
snake_case_ = torch.arange(__a ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :List[str] )->List[Any]:
'''simple docstring'''
snake_case_ = torch.load(__a , map_location="cpu" )["model_state_dict"]
snake_case_ = YosoConfig.from_json_file(__a )
snake_case_ = YosoForMaskedLM(__a )
snake_case_ = convert_checkpoint_helper(config.max_position_embeddings , __a )
print(model.load_state_dict(__a ) )
model.eval()
model.save_pretrained(__a )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 159 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
_lowercase =R"\w+[.]\d+"
_lowercase =re.findall(__a , __a )
for pat in pats:
_lowercase =key.replace(__a , '''_'''.join(pat.split('''.''' ) ) )
return key
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =pt_tuple_key[:-1] + ("scale",)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowercase =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowercase =pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowercase =pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowercase =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowercase =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowercase =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_lowercase =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowercase =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowercase =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=42 ) -> Tuple:
"""simple docstring"""
_lowercase ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowercase =flax_model.init_weights(PRNGKey(__a ) )
_lowercase =flatten_dict(__a )
_lowercase ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowercase =rename_key(__a )
_lowercase =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_lowercase =rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
_lowercase =jnp.asarray(__a )
return unflatten_dict(__a )
| 5 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : int ) -> None:
__UpperCAmelCase : int = value
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
class a :
"""simple docstring"""
def __init__( self : Dict , __lowercase : Node ) -> None:
__UpperCAmelCase : Union[str, Any] = tree
def UpperCAmelCase ( self : Optional[int] , __lowercase : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , __magic_name__ , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : int = config_class
snake_case_ : List[str] = has_text_modality
snake_case_ : str = kwargs
snake_case_ : Any = common_properties
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.config_class(**self.inputs_dict )
snake_case_ : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase_ ):
try:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase_ ):
try:
snake_case_ : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.config_class(**self.inputs_dict )
snake_case_ : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[str] = os.path.join(UpperCAmelCase_ , '''config.json''' )
config_first.to_json_file(UpperCAmelCase_ )
snake_case_ : Tuple = self.config_class.from_json_file(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase_ )
snake_case_ : Any = self.config_class.from_pretrained(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.config_class(**self.inputs_dict )
snake_case_ : List[str] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[str] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
config_first.save_pretrained(UpperCAmelCase_ )
snake_case_ : Any = self.config_class.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case_ : Any = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
if self.config_class.is_composition:
return
snake_case_ : Tuple = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(UpperCAmelCase_ )
snake_case_ : str = self.config_class(**UpperCAmelCase_ )
snake_case_ : Optional[int] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase_ , UpperCAmelCase_ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase_ , UpperCAmelCase_ ), value) )
if len(UpperCAmelCase_ ) > 0:
snake_case_ : List[Any] = "\n".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 279 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["DPTFeatureExtractor"]
a : int = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Dict = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
UpperCAmelCase : Dict = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __a )
if matches:
UpperCAmelCase : Optional[int] = float(matches[1] )
UpperCAmelCase : str = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase : List[str] = 1_001
UpperCAmelCase : str = "imagenet-1k-id2label.json"
UpperCAmelCase : List[Any] = "huggingface/label-files"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : List[str] = {int(__a ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase : str = "background"
UpperCAmelCase : int = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def a__ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any]=False ) -> str:
UpperCAmelCase : Optional[Any] = get_mobilenet_va_config(__a )
# Load 🤗 model
UpperCAmelCase : Optional[int] = MobileNetVaForImageClassification(__a ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__a , __a , __a )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase : Optional[int] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
UpperCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__a )
UpperCAmelCase : List[str] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase : Tuple = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase : Optional[int] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __a , atol=1E-4 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing to the hub...''' )
UpperCAmelCase : Optional[Any] = "google/" + model_name
image_processor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 336 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : Optional[int]=15_36 , lowerCamelCase_ : Union[str, Any]=24 , lowerCamelCase_ : int=24 , lowerCamelCase_ : Dict=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : List[Any]=1e-7 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=-1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : List[str]="gelu" , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = relative_attention
SCREAMING_SNAKE_CASE : List[str] = max_relative_positions
SCREAMING_SNAKE_CASE : str = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE : Tuple = pos_att_type
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[str] =["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 224}
__snake_case : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
__snake_case : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__snake_case : Tuple = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="crop_size" )
__snake_case : Union[str, Any] = do_resize
__snake_case : Union[str, Any] = size
__snake_case : Any = resample
__snake_case : List[str] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : List[str] = do_rescale
__snake_case : List[str] = rescale_factor
__snake_case : str = do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : int = do_convert_rgb
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__snake_case : List[str] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__snake_case : int = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> int:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
__snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
__snake_case : List[Any] = size if size is not None else self.size
__snake_case : List[Any] = get_size_dict(UpperCAmelCase_ , param_name="size" , default_to_square=UpperCAmelCase_ )
__snake_case : Any = resample if resample is not None else self.resample
__snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Any = crop_size if crop_size is not None else self.crop_size
__snake_case : Union[str, Any] = get_size_dict(UpperCAmelCase_ , param_name="crop_size" , default_to_square=UpperCAmelCase_ )
__snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[int] = image_std if image_std is not None else self.image_std
__snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Dict = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : List[Any] = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
__snake_case : Union[str, Any] = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
__snake_case : Optional[Any] = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
__snake_case : Any = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
__snake_case : List[str] = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
__snake_case : List[str] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
__snake_case : str = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 326 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [[1, 2, 4], [1, 2, 3, 4]]
A_ = DisjunctiveConstraint(UpperCAmelCase_ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase_ ) )
with self.assertRaises(UpperCAmelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase_ ):
DisjunctiveConstraint(UpperCAmelCase_ ) # fails here
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = [[1, 2, 3], [1, 2, 4]]
A_ = DisjunctiveConstraint(UpperCAmelCase_ )
A_ = dc.update(1 )
A_ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A_ = dc.update(2 )
A_ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A_ = dc.update(3 )
A_ = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A_ = DisjunctiveConstraint(UpperCAmelCase_ )
A_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase : List[Any] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
lowerCamelCase_ = [image]
lowerCamelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCamelCase_ = torch.stack(__a )
return image
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , A_ : Optional[Any] , A_ : str ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
def a__ ( self : str , A_ : Tuple ) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def a__ ( self : List[Any] , A_ : int , A_ : Optional[Any] , A_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ ( self : Optional[Any] , A_ : Optional[int] , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : int=None ) -> str:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}""" )
lowerCamelCase_ = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = init_latents.shape
lowerCamelCase_ = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
print('add noise to latents at timestep' , UpperCAmelCase_ )
lowerCamelCase_ = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self : str , A_ : Union[torch.FloatTensor, PIL.Image.Image] = None , A_ : float = 0.8 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : float = 0.0 , A_ : int = 50 , A_ : Optional[bool] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(UpperCAmelCase_ )
# 2. Preprocess image
lowerCamelCase_ = preprocess(UpperCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
lowerCamelCase_ = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
lowerCamelCase_ = timesteps[:1].repeat(UpperCAmelCase_ )
# 4. Prepare latent variables
lowerCamelCase_ = self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_ )
lowerCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase_ ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.