code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _A :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=99 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : List[str]=None , ):
a : str = parent
a : Union[str, Any] = batch_size
a : Optional[Any] = seq_length
a : int = is_training
a : int = use_input_mask
a : Optional[Any] = use_token_type_ids
a : Any = use_labels
a : Optional[int] = vocab_size
a : Optional[int] = hidden_size
a : Any = num_hidden_layers
a : str = num_attention_heads
a : int = intermediate_multiple_size
a : Any = hidden_act
a : Union[str, Any] = hidden_dropout
a : int = attention_dropout
a : str = weight_tying
a : Optional[int] = max_position_embeddings
a : Optional[int] = type_vocab_size
a : int = type_sequence_label_size
a : Optional[int] = initializer_range
a : str = num_labels
a : List[Any] = num_choices
a : Any = scope
def __snake_case ( self : List[str]):
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Union[str, Any] = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : int):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __snake_case ( self : Union[str, Any]):
a , a , a , a : Tuple = self.prepare_config_and_inputs()
a : str = True
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]):
a : str = GPTNeoXJapaneseModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : str):
a : Optional[Any] = True
a : Dict = GPTNeoXJapaneseModel(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]):
a : List[Any] = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = True
a : Any = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
# first forward pass
a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase)
a : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
a : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
a : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1)
a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase)
a : List[Any] = output_from_no_past["hidden_states"][0]
a : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
a : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
a : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
def __snake_case ( self : Optional[int]):
a : Tuple = self.prepare_config_and_inputs()
a , a , a , a : Any = config_and_inputs
a : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self : Any):
a : Optional[int] = GPTNeoXJapaneseModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : List[str]):
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple):
a , a , a , a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Dict):
a , a , a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
# This regression test was failing with PyTorch < 1.3
a , a , a , a : int = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : str):
a , a , a , a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def __snake_case ( self : List[str]):
a : Optional[int] = "abeja/gpt-neox-japanese-2.7b"
a : int = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
a : Union[str, Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
a : Tuple = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase)
a : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase)
a : int = []
for prompt in prompts:
a : List[str] = tokenizer(__UpperCAmelCase , return_tensors="pt").input_ids
a : List[str] = model.generate(__UpperCAmelCase , max_length=50)
a : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
| 40 |
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase :Union[str, Any] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """gpt_bigcode"""
A_ : str = ["""past_key_values"""]
A_ : List[str] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[Any] , _A : Dict=50257 , _A : Any=1024 , _A : List[Any]=768 , _A : Optional[int]=12 , _A : List[str]=12 , _A : Optional[Any]=None , _A : Optional[int]="gelu_pytorch_tanh" , _A : List[str]=0.1 , _A : Any=0.1 , _A : List[str]=0.1 , _A : int=1E-5 , _A : str=0.02 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : List[Any]=50256 , _A : Optional[int]=50256 , _A : List[str]=True , _A : List[Any]=True , _A : Optional[Any]=True , **_A : str , ) -> Optional[Any]:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = n_positions
__magic_name__ : Tuple = n_embd
__magic_name__ : Dict = n_layer
__magic_name__ : List[Any] = n_head
__magic_name__ : Tuple = n_inner
__magic_name__ : Tuple = activation_function
__magic_name__ : Any = resid_pdrop
__magic_name__ : Tuple = embd_pdrop
__magic_name__ : Dict = attn_pdrop
__magic_name__ : List[str] = layer_norm_epsilon
__magic_name__ : Any = initializer_range
__magic_name__ : Tuple = scale_attn_weights
__magic_name__ : Optional[Any] = use_cache
__magic_name__ : str = attention_softmax_in_fpaa
__magic_name__ : Tuple = scale_attention_softmax_in_fpaa
__magic_name__ : List[Any] = multi_query
__magic_name__ : Union[str, Any] = bos_token_id
__magic_name__ : str = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) | 275 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = ["""pixel_values"""]
def __init__( self : Any , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[Any] , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 256}
__magic_name__ : str = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__magic_name__ : Optional[int] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : List[str] = resample
__magic_name__ : Dict = do_center_crop
__magic_name__ : List[str] = crop_size
__magic_name__ : int = do_rescale
__magic_name__ : Tuple = rescale_factor
__magic_name__ : List[str] = do_normalize
__magic_name__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ : Dict = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ) -> np.ndarray:
__magic_name__ : int = get_size_dict(_A )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple ) -> np.ndarray:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : List[Any] , ) -> List[str]:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Tuple = size if size is not None else self.size
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : Dict = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Dict = crop_size if crop_size is not None else self.crop_size
__magic_name__ : List[str] = get_size_dict(_A )
__magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Any = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Tuple = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Union[str, Any] = image_std if image_std is not None else self.image_std
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : List[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : Union[str, Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__magic_name__ : Union[str, Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__magic_name__ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : Optional[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : List[str] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 275 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _A : int ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowercase_ ( _A : Any , _A : Optional[int] ):
"""simple docstring"""
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def lowercase_ ( _A : Optional[Any] , _A : Dict , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = np.dot(_A , _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def lowercase_ ( _A : Union[str, Any] , _A : Dict , _A : Tuple , _A : Dict=70000 ):
"""simple docstring"""
lowerCamelCase__ : int = np.zeros(x.shape[1] )
for iterations in range(_A ):
lowerCamelCase__ : Optional[Any] = np.dot(_A , _A )
lowerCamelCase__ : Optional[int] = sigmoid_function(_A )
lowerCamelCase__ : Union[str, Any] = np.dot(x.T , h - y ) / y.size
lowerCamelCase__ : Union[str, Any] = theta - alpha * gradient # updating the weights
lowerCamelCase__ : List[Any] = np.dot(_A , _A )
lowerCamelCase__ : List[str] = sigmoid_function(_A )
lowerCamelCase__ : List[str] = cost_function(_A , _A )
if iterations % 100 == 0:
print(F"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A : List[Any] = datasets.load_iris()
A : List[str] = iris.data[:, :2]
A : str = (iris.target != 0) * 1
A : Optional[int] = 0.1
A : List[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
return sigmoid_function(
np.dot(_A , _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A), (A)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((A), (A)) : Optional[int] = (x[:, 1].min(), x[:, 1].max())
((A), (A)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
A : List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 184 |
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
_lowerCAmelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_lowerCAmelCase = 'document_qa'
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = VisionEncoderDecoderModel
_lowerCAmelCase = ['image', 'text']
_lowerCAmelCase = ['text']
def __init__( self : Optional[int] , *_lowerCamelCase : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self : str , _lowerCamelCase : "Image" , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Tuple = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A_ : List[str] = task_prompt.replace('''{user_input}''' , _lowerCamelCase )
A_ : str = self.pre_processor.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors='''pt''' ).input_ids
A_ : Optional[Any] = self.pre_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCamelCase , ).sequences
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.pre_processor.batch_decode(_lowerCamelCase )[0]
A_ : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
A_ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
A_ : str = re.sub(R'''<.*?>''' , '''''' , _lowerCamelCase , count=1 ).strip() # remove first task start token
A_ : Any = self.pre_processor.tokenajson(_lowerCamelCase )
return sequence["answer"]
| 4 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class a__ :
lowerCamelCase : Optional[Any] =LEDConfig
lowerCamelCase : Any ={}
lowerCamelCase : Optional[Any] ='gelu'
def __init__( self : Union[str, Any] , a : List[Any] , a : str=13 , a : Optional[Any]=7 , a : Any=True , a : Optional[Any]=False , a : Optional[Any]=99 , a : Union[str, Any]=32 , a : Tuple=2 , a : Any=4 , a : Any=37 , a : Any=0.1 , a : Tuple=0.1 , a : int=20 , a : List[str]=2 , a : Dict=1 , a : Tuple=0 , a : str=4 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase = prepare_led_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase = tf.concat(
[tf.zeros_like(_SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(_SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , )
__lowerCamelCase = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = TFLEDModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
__lowerCamelCase = inputs_dict["""input_ids"""]
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Any:
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase : Dict =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : int =(
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Any =True
lowerCamelCase : Tuple =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Dict =False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = TFLEDModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
__lowerCamelCase = 2
__lowerCamelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__lowerCamelCase = True
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a : List[Any] ):
__lowerCamelCase = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a : List[str] ):
__lowerCamelCase = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
return tf.constant(UpperCamelCase__ , dtype=tf.intaa )
__UpperCAmelCase =1e-4
@slow
@require_tf
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__lowerCamelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = prepare_led_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(**_SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase = (1, 10_24, 7_68)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
__lowerCamelCase = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__lowerCamelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = prepare_led_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase = model(**_SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
__lowerCamelCase = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 , rtol=1e-3 )
| 67 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
UpperCAmelCase : List[str] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=UpperCamelCase )
UpperCAmelCase : Dict = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=UpperCamelCase )
env_command_parser(subparsers=UpperCamelCase )
launch_command_parser(subparsers=UpperCamelCase )
tpu_command_parser(subparsers=UpperCamelCase )
test_command_parser(subparsers=UpperCamelCase )
# Let's go
UpperCAmelCase : Optional[int] = parser.parse_args()
if not hasattr(UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''PoolFormerFeatureExtractor''']
lowerCamelCase_ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 253 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = "ZinengTang/tvlt-base"
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : int ) -> List[str]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = np.ones([12_000] )
UpperCAmelCase_ : Dict = feature_extractor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[Any] = processor(audio=lowerCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = np.ones([12_000] )
UpperCAmelCase_ : int = np.ones([3, 224, 224] )
UpperCAmelCase_ : Union[str, Any] = processor(audio=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 253 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase = quote(lowercase_ )
return hfh.hf_hub_url(lowercase_ , lowercase_ , repo_type='dataset' , revision=lowercase_ )
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
lowerCAmelCase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCAmelCase__ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCAmelCase__ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def lowercase__ ( self , snake_case__=None , snake_case__=None , snake_case__=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(snake_case__ , snake_case__ )["wer"]
else:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : List[Any] = 0
for prediction, reference in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = compute_measures(snake_case__ , snake_case__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 350 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) )
| 133 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'mvp'
a : Optional[Any] = ['past_key_values']
a : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , __lowercase : Tuple=50267 , __lowercase : Optional[int]=1024 , __lowercase : Any=12 , __lowercase : List[Any]=4096 , __lowercase : Optional[Any]=16 , __lowercase : List[str]=12 , __lowercase : Optional[Any]=4096 , __lowercase : Tuple=16 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=0.0 , __lowercase : Any="gelu" , __lowercase : Any=1024 , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.0 , __lowercase : int=0.0 , __lowercase : str=0.02 , __lowercase : Tuple=0.0 , __lowercase : Union[str, Any]=False , __lowercase : Dict=True , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[int]=True , __lowercase : Dict=2 , __lowercase : int=2 , __lowercase : Union[str, Any]=False , __lowercase : Union[str, Any]=100 , __lowercase : str=800 , **__lowercase : Union[str, Any] , ) -> Any:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : int = encoder_ffn_dim
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Optional[int] = dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : List[Any] = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = encoder_layerdrop
__UpperCAmelCase : Union[str, Any] = decoder_layerdrop
__UpperCAmelCase : List[Any] = classifier_dropout
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Tuple = use_prompt
__UpperCAmelCase : str = prompt_length
__UpperCAmelCase : Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 114 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 117 | 1 |
from math import pow, sqrt
def _lowercase ( *lowercase__ ):
__lowerCAmelCase : Dict = len(lowercase__ ) > 0 and all(value > 0.0 for value in values )
return result
def _lowercase ( lowercase__ , lowercase__ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 275 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : Tuple=64 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> int:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = vocab_size - 1
def a ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def a ( self : Optional[int] ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def a ( self : int ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = True
return config, input_ids, input_mask, token_labels
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = GPTNeoXModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = True
lowerCAmelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
lowerCAmelCase__ = True
lowerCAmelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# first forward pass
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output_from_no_past["hidden_states"][0]
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )["hidden_states"][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase__ = GPTNeoXModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=64 , num_attention_heads=8 )
def a ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def a ( self : str ) -> str:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
original_model.eval()
lowerCAmelCase__ = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
lowerCAmelCase__ = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
scaled_model.to(SCREAMING_SNAKE_CASE__ )
scaled_model.eval()
lowerCAmelCase__ = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
lowerCAmelCase__ = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : str ) -> List[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
lowerCAmelCase__ = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCAmelCase__ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowerCAmelCase__ = model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=20 )
lowerCAmelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 350 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
return (args[0] + 1,) + args[1:], kwargs
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
return output + 1
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : List[str] ) -> Tuple:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : Union[str, Any] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(x + 1 )
lowerCAmelCase__ = test_model(x + 2 )
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 )
def a ( self : Optional[int] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ = True
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
lowerCAmelCase__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 221 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCamelCase : List[Any] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCamelCase : Union[str, Any] = '''document_qa'''
lowerCamelCase : List[str] = AutoProcessor
lowerCamelCase : List[str] = VisionEncoderDecoderModel
lowerCamelCase : Union[str, Any] = ['''image''', '''text''']
lowerCamelCase : Any = ['''text''']
def __init__( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str ) -> Dict:
lowerCAmelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowerCAmelCase = task_prompt.replace('{user_input}' , UpperCAmelCase__ )
lowerCAmelCase = self.pre_processor.tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='pt' ).input_ids
lowerCAmelCase = self.pre_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] ) -> int:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple ) -> Dict:
lowerCAmelCase = self.pre_processor.batch_decode(UpperCAmelCase__ )[0]
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
lowerCAmelCase = re.sub(R'<.*?>' , '' , UpperCAmelCase__ , count=1 ).strip() # remove first task start token
lowerCAmelCase = self.pre_processor.tokenajson(UpperCAmelCase__ )
return sequence["answer"]
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
from collections import namedtuple
_snake_case = namedtuple("from_to", "from_ to")
_snake_case = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid \'from_type\' value: {from_type!r} Supported values are:\n"
+ ", ".join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"
+ ", ".join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
UpperCamelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __UpperCAmelCase( _lowerCAmelCase ):
"""simple docstring"""
__lowerCamelCase = "unispeech"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=0.5 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any]= hidden_size
lowercase__ : Optional[Any]= feat_extract_norm
lowercase__ : Optional[int]= feat_extract_activation
lowercase__ : Dict= list(SCREAMING_SNAKE_CASE_ )
lowercase__ : str= list(SCREAMING_SNAKE_CASE_ )
lowercase__ : Any= list(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any]= conv_bias
lowercase__ : Tuple= num_conv_pos_embeddings
lowercase__ : str= num_conv_pos_embedding_groups
lowercase__ : List[Any]= len(self.conv_dim )
lowercase__ : Tuple= num_hidden_layers
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[Any]= hidden_act
lowercase__ : str= num_attention_heads
lowercase__ : Tuple= hidden_dropout
lowercase__ : Dict= attention_dropout
lowercase__ : Optional[int]= activation_dropout
lowercase__ : List[str]= feat_proj_dropout
lowercase__ : int= final_dropout
lowercase__ : Dict= layerdrop
lowercase__ : Optional[int]= layer_norm_eps
lowercase__ : Optional[int]= initializer_range
lowercase__ : Optional[int]= num_ctc_classes
lowercase__ : Any= vocab_size
lowercase__ : int= do_stable_layer_norm
lowercase__ : Optional[int]= use_weighted_layer_sum
lowercase__ : List[str]= classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Any= apply_spec_augment
lowercase__ : List[Any]= mask_time_prob
lowercase__ : Any= mask_time_length
lowercase__ : Any= mask_time_min_masks
lowercase__ : Union[str, Any]= mask_feature_prob
lowercase__ : List[Any]= mask_feature_length
lowercase__ : Tuple= mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : int= num_codevectors_per_group
lowercase__ : List[Any]= num_codevector_groups
lowercase__ : Optional[int]= contrastive_logits_temperature
lowercase__ : str= feat_quantizer_dropout
lowercase__ : List[str]= num_negatives
lowercase__ : Optional[Any]= codevector_dim
lowercase__ : Any= proj_codevector_dim
lowercase__ : Union[str, Any]= diversity_loss_weight
# ctc loss
lowercase__ : Optional[Any]= ctc_loss_reduction
lowercase__ : List[Any]= ctc_zero_infinity
# pretraining loss
lowercase__ : Tuple= replace_prob
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 366 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a : Any = get_logger(__name__)
a : Any = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
for processor in self:
lowercase__ : Optional[Any]= inspect.signature(processor.__call__ ).parameters
if len(snake_case__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowercase__ : Union[str, Any]= processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
else:
lowercase__ : Dict= processor(snake_case__ , snake_case__ , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase__ : Any= temperature
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= scores / self.temperature
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase__ : int= top_p
lowercase__ : Optional[int]= filter_value
lowercase__ : Tuple= min_tokens_to_keep
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Dict= lax.top_k(snake_case__ , scores.shape[-1] )
lowercase__ : Optional[int]= jnp.full_like(snake_case__ , self.filter_value )
lowercase__ : Union[str, Any]= jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 )
lowercase__ : str= cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase__ : str= jnp.roll(snake_case__ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case__ )
# min tokens to keep
lowercase__ : Optional[int]= score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ )
lowercase__ : str= jnp.where(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1]
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase__ : List[Any]= max(snake_case__ , snake_case__ )
lowercase__ : Dict= filter_value
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= scores.shape
lowercase__ : int= jnp.full(batch_size * vocab_size , self.filter_value )
lowercase__ : Dict= min(self.top_k , scores.shape[-1] ) # Safety check
lowercase__, lowercase__ : List[Any]= lax.top_k(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase__ : str= topk_scores.flatten()
lowercase__ : Any= topk_indices.flatten() + shift
lowercase__ : Optional[Any]= next_scores_flat.at[topk_indices_flat].set(snake_case__ )
lowercase__ : str= next_scores_flat.reshape(snake_case__ , snake_case__ )
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= bos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : int= 1 - jnp.bool_(cur_len - 1 )
lowercase__ : int= jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= max_length
lowercase__ : str= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : Any= 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase__ : Optional[int]= jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase__ : List[str]= min_length
lowercase__ : Dict= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
lowercase__ : Tuple= 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase__ : Dict= jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= list(snake_case__ )
lowercase__ : List[Any]= begin_index
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= 1 - jnp.bool_(cur_len - self.begin_index )
lowercase__ : str= jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= list(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= dict(snake_case__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase__ : List[Any]= jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase__ : List[Any]= force_token_array.at[index].set(snake_case__ )
lowercase__ : int= jnp.intaa(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
def _force_token(snake_case__ ):
lowercase__ : Dict= scores.shape[0]
lowercase__ : Any= self.force_token_array[generation_idx]
lowercase__ : List[Any]= jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" )
lowercase__ : List[Any]= jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase__ : List[str]= lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) )
return new_scores
lowercase__ : Dict= lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= generate_config.eos_token_id
lowercase__ : Optional[int]= generate_config.no_timestamps_token_id
lowercase__ : Dict= generate_config.no_timestamps_token_id + 1
lowercase__ : List[Any]= decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case__ , "max_initial_timestamp_index" ):
lowercase__ : int= generate_config.max_initial_timestamp_index
else:
lowercase__ : Dict= model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase__ : str= model_config.vocab_size
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
lowercase__ : int= scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ )
lowercase__ : Tuple= jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , )
lowercase__ : int= jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , )
return jnp.where(
snake_case__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , )
lowercase__ : List[str]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
lowercase__ : str= jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ )
lowercase__ : List[Any]= jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , )
lowercase__ : Any= self.timestamp_begin + self.max_initial_timestamp_index
lowercase__ : str= jnp.where(
snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase__ : str= jax.nn.log_softmax(snake_case__ , axis=-1 )
def handle_cumulative_probs(snake_case__ , snake_case__ ):
lowercase__ : Dict= jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase__ : Union[str, Any]= jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , )
lowercase__ : Optional[int]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
return scores
| 150 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194 |
# Lint as: python3
import itertools
import os
import re
lowerCamelCase = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase = re.compile(R'(_{2,})')
lowerCamelCase = R'^\w+(\.\w+)*$'
lowerCamelCase = R'<>:/\|?*'
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] =_uppercase_uppercase_re.sub(r'\1_\2' , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =_lowercase_uppercase_re.sub(r'\1_\2' , SCREAMING_SNAKE_CASE__ )
return name.lower()
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple =_single_underscore_re.split(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =[_multiple_underscores_re.split(SCREAMING_SNAKE_CASE__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) if n != '' )
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(SCREAMING_SNAKE_CASE__ )}-{split}'''
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_lowerCamelCase : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return F'''{filepath}*'''
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Dict =filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Union[str, Any] =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if shard_lengths:
_lowerCamelCase : Union[str, Any] =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(SCREAMING_SNAKE_CASE__ )]
if filetype_suffix:
_lowerCamelCase : List[Any] =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_lowerCamelCase : Any =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 199 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase__ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase__ : List[str] = global_rng
UpperCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = min_seq_length
UpperCAmelCase__ : str = max_seq_length
UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : str = do_normalize
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ):
'''simple docstring'''
def _flatten(_A : Union[str, Any] ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = ASTFeatureExtractionTester(self )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Any = np.asarray(_A )
UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 299 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
if len(__lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(__lowerCamelCase, n - 1 )
rec_insertion_sort(__lowerCamelCase, n - 1 )
def lowerCAmelCase_ ( __A, __A ) -> Union[str, Any]:
'''simple docstring'''
if index >= len(__lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase__ = (
collection[index],
collection[index - 1],
)
insert_next(__lowerCamelCase, index + 1 )
if __name__ == "__main__":
UpperCamelCase__ = input('Enter integers separated by spaces: ')
UpperCamelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[int] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123 | 0 |
'''simple docstring'''
UpperCAmelCase_ = 9.8_0665
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 352 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """dpt"""
def __init__( self : Optional[Any] , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=30_72 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : List[Any]=1E-12 , _UpperCAmelCase : int=3_84 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=[2, 5, 8, 11] , _UpperCAmelCase : Any="project" , _UpperCAmelCase : Optional[Any]=[4, 2, 1, 0.5] , _UpperCAmelCase : Tuple=[96, 1_92, 3_84, 7_68] , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=0.4 , _UpperCAmelCase : Union[str, Any]=2_55 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Tuple=[1, 10_24, 24, 24] , _UpperCAmelCase : Union[str, Any]=[0, 1] , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCAmelCase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
UpperCAmelCase__ = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCAmelCase__ = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
UpperCAmelCase__ = backbone_featmap_shape
UpperCAmelCase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = []
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
UpperCAmelCase__ = readout_type
UpperCAmelCase__ = reassemble_factors
UpperCAmelCase__ = neck_hidden_sizes
UpperCAmelCase__ = fusion_hidden_size
UpperCAmelCase__ = head_in_index
UpperCAmelCase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = semantic_loss_ignore_index
UpperCAmelCase__ = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61 | 0 |
'''simple docstring'''
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_UpperCamelCase = 'facebook/wmt19-en-de'
_UpperCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_UpperCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_UpperCamelCase = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
_UpperCamelCase = tokenizer(['Making tiny model'], return_tensors='pt')
_UpperCamelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_UpperCamelCase = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 208 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( a_ ):
UpperCAmelCase_ : BigBirdConfig
UpperCAmelCase_ : jnp.dtype =jnp.floataa
UpperCAmelCase_ : bool =True
def UpperCamelCase_ ( self ):
super().setup()
lowercase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
lowercase = super().__call__(*_lowerCamelCase , **_lowerCamelCase )
lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( a_ ):
UpperCAmelCase_ : str =FlaxBigBirdForNaturalQuestionsModule
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Tuple ):
'''simple docstring'''
def cross_entropy(__snake_case : Dict , __snake_case : str , __snake_case : Any=None ):
lowercase = logits.shape[-1]
lowercase = (labels[..., None] == jnp.arange(__snake_case )[None]).astype('f4' )
lowercase = jax.nn.log_softmax(__snake_case , axis=-1 )
lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase = reduction(__snake_case )
return loss
lowercase = partial(__snake_case , reduction=jnp.mean )
lowercase = cross_entropy(__snake_case , __snake_case )
lowercase = cross_entropy(__snake_case , __snake_case )
lowercase = cross_entropy(__snake_case , __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
UpperCAmelCase_ : str ="google/bigbird-roberta-base"
UpperCAmelCase_ : int =3000
UpperCAmelCase_ : int =1_0500
UpperCAmelCase_ : int =128
UpperCAmelCase_ : int =3
UpperCAmelCase_ : int =1
UpperCAmelCase_ : int =5
# tx_args
UpperCAmelCase_ : float =3e-5
UpperCAmelCase_ : float =0.0
UpperCAmelCase_ : int =2_0000
UpperCAmelCase_ : float =0.00_95
UpperCAmelCase_ : str ="bigbird-roberta-natural-questions"
UpperCAmelCase_ : str ="training-expt"
UpperCAmelCase_ : str ="data/nq-training.jsonl"
UpperCAmelCase_ : str ="data/nq-validation.jsonl"
def UpperCamelCase_ ( self ):
os.makedirs(self.base_dir , exist_ok=_lowerCamelCase )
lowercase = os.path.join(self.base_dir , self.save_dir )
lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
UpperCAmelCase_ : int
UpperCAmelCase_ : int =4096 # no dynamic padding on TPUs
def __call__( self , _lowerCamelCase ):
lowercase = self.collate_fn(_lowerCamelCase )
lowercase = jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase )
return batch
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase , lowercase = self.fetch_inputs(features['input_ids'] )
lowercase = {
'input_ids': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'attention_mask': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = [1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any]=None ):
'''simple docstring'''
if seed is not None:
lowercase = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap , axis_name='batch' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
def loss_fn(__snake_case : str ):
lowercase = model_inputs.pop('start_labels' )
lowercase = model_inputs.pop('end_labels' )
lowercase = model_inputs.pop('pooled_labels' )
lowercase = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case )
lowercase , lowercase , lowercase = outputs
return state.loss_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
lowercase , lowercase = jax.random.split(__snake_case )
lowercase = jax.value_and_grad(__snake_case )
lowercase , lowercase = grad_fn(state.params )
lowercase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowercase = jax.lax.pmean(__snake_case , 'batch' )
lowercase = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , **__snake_case : Dict ):
'''simple docstring'''
lowercase = model_inputs.pop('start_labels' )
lowercase = model_inputs.pop('end_labels' )
lowercase = model_inputs.pop('pooled_labels' )
lowercase = state.apply_fn(**__snake_case , params=state.params , train=__snake_case )
lowercase , lowercase , lowercase = outputs
lowercase = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowercase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class a ( train_state.TrainState ):
UpperCAmelCase_ : Callable =struct.field(pytree_node=a_ )
@dataclass
class a :
UpperCAmelCase_ : Args
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : wandb
UpperCAmelCase_ : Callable =None
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowercase = model.params
lowercase = TrainState.create(
apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , )
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase = restore_checkpoint(_lowerCamelCase , _lowerCamelCase )
lowercase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowercase , lowercase = build_tx(**_lowerCamelCase )
lowercase = train_state.TrainState(
step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , )
lowercase = args
lowercase = data_collator
lowercase = lr
lowercase = params
lowercase = jax_utils.replicate(_lowerCamelCase )
return state
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.args
lowercase = len(_lowerCamelCase ) // args.batch_size
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase = jnp.array(0 , dtype=jnp.floataa )
lowercase = get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase )
lowercase = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=F'Running EPOCH-{epoch}' ):
lowercase = self.data_collator(_lowerCamelCase )
lowercase , lowercase , lowercase = self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowercase = jax_utils.unreplicate(state.step )
lowercase = running_loss.item() / i
lowercase = self.scheduler_fn(state_step - 1 )
lowercase = self.evaluate(_lowerCamelCase , _lowerCamelCase )
lowercase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase , commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = get_batched_dataset(_lowerCamelCase , self.args.batch_size )
lowercase = len(_lowerCamelCase ) // self.args.batch_size
lowercase = jnp.array(0 , dtype=jnp.floataa )
lowercase = 0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='Evaluating ... ' ):
lowercase = self.data_collator(_lowerCamelCase )
lowercase = self.val_step_fn(_lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = jax_utils.unreplicate(_lowerCamelCase )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(_lowerCamelCase , params=state.params )
with open(os.path.join(_lowerCamelCase , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCamelCase , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , 'data_collator.joblib' ) )
with open(os.path.join(_lowerCamelCase , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , _lowerCamelCase )
print('DONE' )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Tuple ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(__snake_case , 'flax_model.msgpack' ) , 'rb' ) as f:
lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(__snake_case , 'opt_state.msgpack' ) , 'rb' ) as f:
lowercase = from_bytes(state.opt_state , f.read() )
lowercase = joblib.load(os.path.join(__snake_case , 'args.joblib' ) )
lowercase = joblib.load(os.path.join(__snake_case , 'data_collator.joblib' ) )
with open(os.path.join(__snake_case , 'training_state.json' ) , 'r' ) as f:
lowercase = json.load(__snake_case )
lowercase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : str , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
lowercase = num_train_steps - warmup_steps
lowercase = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case )
lowercase = optax.linear_schedule(init_value=__snake_case , end_value=1e-7 , transition_steps=__snake_case )
lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
def weight_decay_mask(__snake_case : Tuple ):
lowercase = traverse_util.flatten_dict(__snake_case )
lowercase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
lowercase = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case )
lowercase = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case )
return tx, lr
| 220 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCamelCase = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
__UpperCamelCase = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
__UpperCamelCase = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False ) -> Optional[int]:
if rouge_types is None:
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=lowerCAmelCase__ , use_stemmer=lowerCAmelCase__ )
if use_aggregator:
SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE = []
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = scorer.score(lowerCAmelCase__ , lowerCAmelCase__ )
if use_aggregator:
aggregator.add_scores(lowerCAmelCase__ )
else:
scores.append(lowerCAmelCase__ )
if use_aggregator:
SCREAMING_SNAKE_CASE = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE = [score[key] for score in scores]
return result
| 361 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.values[key]
def __A ( self ) -> List[Any]:
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 38 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = IFPipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _SCREAMING_SNAKE_CASE ( self : str):
return self._get_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: int = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA")
def _SCREAMING_SNAKE_CASE ( self : Dict):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : int):
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# if
SCREAMING_SNAKE_CASE_: str = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda")
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = pipe_a.encode_prompt("anime turtle" , device="cuda")
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: int = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_: Any = IFImgaImgPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_: Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_: Optional[int] = IFInpaintingPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE_: Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: List[Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: str = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_: int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_: List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_: List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy")
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def A_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 13 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> list[int]:
"""simple docstring"""
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __UpperCamelCase )
print("""Decoded:""" , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 204 | from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case :
def __init__( self : Any , _lowercase : Tuple , _lowercase : str=2 , _lowercase : List[Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=2 , _lowercase : str=7 , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : Dict=36 , _lowercase : Tuple=2 , _lowercase : Optional[int]=4 , _lowercase : int=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Tuple=0.1 , _lowercase : str=5_12 , _lowercase : Dict=16 , _lowercase : int=2 , _lowercase : int=0.02 , _lowercase : Any=6 , _lowercase : List[Any]=6 , _lowercase : List[Any]=3 , _lowercase : List[Any]=4 , _lowercase : int=None , _lowercase : Optional[int]=10_00 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = coordinate_size
SCREAMING_SNAKE_CASE__ = shape_size
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ = text_seq_length
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ = self.text_seq_length + self.image_seq_length
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ = tmp_coordinate
SCREAMING_SNAKE_CASE__ = tf.constant(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel(config=_lowercase )
# text + image
SCREAMING_SNAKE_CASE__ = model(_lowercase , pixel_values=_lowercase , training=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , training=_lowercase , )
SCREAMING_SNAKE_CASE__ = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ = model({"""pixel_values""": pixel_values} , training=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForSequenceClassification(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , _lowercase : Dict , _lowercase : Tuple , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForTokenClassification(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self : str , _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForQuestionAnswering(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , training=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ):
"""simple docstring"""
return True
def __a ( self : List[str] , _lowercase : List[Any] , _lowercase : str , _lowercase : str=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_lowercase )
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = {
k: tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
if getattr(_lowercase , """hf_compute_loss""" , _lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowercase )[0]
]
SCREAMING_SNAKE_CASE__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" )
SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE__ = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE__ = -1_00
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE__ = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE__ = {0: """input_ids"""}
for label_key in label_keys:
SCREAMING_SNAKE_CASE__ = signature_names.index(_lowercase )
SCREAMING_SNAKE_CASE__ = label_key
SCREAMING_SNAKE_CASE__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE__ = prepared_for_class[value]
SCREAMING_SNAKE_CASE__ = tuple(_lowercase )
# Send to model
SCREAMING_SNAKE_CASE__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self : Optional[int] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : int ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : int ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
def __a ( self : List[str] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
@slow
def __a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_lowercase ) if is_vision_available() else None
@slow
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_lowercase , return_tensors="""tf""" ).pixel_values
SCREAMING_SNAKE_CASE__ = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , _lowercase )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ) )
| 204 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _lowerCamelCase( __lowerCAmelCase ):
lowercase_ : Dict = """audio-spectrogram-transformer"""
def __init__( self, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=16, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=10, lowerCamelCase=10_24, lowerCamelCase=1_28, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase_)
_lowercase : List[str] = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : int = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = initializer_range
_lowercase : Any = layer_norm_eps
_lowercase : List[str] = patch_size
_lowercase : Dict = qkv_bias
_lowercase : int = frequency_stride
_lowercase : Dict = time_stride
_lowercase : int = max_length
_lowercase : Optional[Any] = num_mel_bins
| 21 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 0 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Optional[Any] = threading.Lock()
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCAmelCase : Any = logging.WARNING
_lowerCAmelCase : Tuple = True
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
return __name__.split("." )[0]
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : Optional[Any] = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : List[str] = False
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : List[Any] = None
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
return log_levels
def lowerCamelCase_( _lowerCamelCase = None ) -> Dict:
'''simple docstring'''
if name is None:
_lowerCamelCase : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return set_verbosity(a_ )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
_lowerCamelCase : Union[str, Any] = False
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
_lowerCamelCase : int = True
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : List[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(a_ )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def lowerCamelCase_( self , *_lowerCamelCase , **_lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
_lowerCAmelCase : Any = warning_advice
@functools.lru_cache(a_ )
def lowerCamelCase_( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.warning(*a_ , **a_ )
_lowerCAmelCase : Optional[int] = warning_once
class A_ :
def __init__( self: List[str] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[Any] ): # pylint: disable=unused-argument
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = args[0] if args else None
def __iter__( self: List[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Any ,__lowerCAmelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCAmelCase: Any ,**__lowerCAmelCase: List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return
class A_ :
def __call__( self: Any ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ ,**lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def _lowercase ( self: List[Any] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : str = _tqdm_cls()
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
global _tqdm_active
_lowerCamelCase : int = False
hf_hub_utils.disable_progress_bars() | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _A ( unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ZeroShotClassificationPipeline(
model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
# No kwarg
SCREAMING_SNAKE_CASE_ : int = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
SCREAMING_SNAKE_CASE_ : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
SCREAMING_SNAKE_CASE_ : List[Any] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
SCREAMING_SNAKE_CASE_ : Dict = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE_ : str = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE_ : Any = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(_SCREAMING_SNAKE_CASE , candidate_labels='politics' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('Who are you voting for in 2020?' , candidate_labels=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_SCREAMING_SNAKE_CASE , )
self.run_entailment_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE_ : List[str] = config.labelaid
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE_ : List[Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE_ : Tuple = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE_ : List[Any] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_labelaid
self.assertEqual(_SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE_ : int = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 253 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase : Any = logging.getLogger(__name__)
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = label_idx
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = mode.value
SCREAMING_SNAKE_CASE_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Dict = []
else:
SCREAMING_SNAKE_CASE_ : List[str] = line.split(' ' )
words.append(splits[0] )
if len(_SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _A ( __magic_name__):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : int = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _A ( __magic_name__):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = mode.value
SCREAMING_SNAKE_CASE_ : str = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = preds_list[example_id]
SCREAMING_SNAKE_CASE_ : Any = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE )
example_id += 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 253 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
snake_case__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 352 | '''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Optional[int] = '''1'''
snake_case__ : str = '''0'''
snake_case__ : List[str] = '''1'''
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ : str = ort.RunOptions()
snake_case__ : List[Any] = 128
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ : Union[str, Any] = time.time()
snake_case__ : str = 2000
snake_case__ : Tuple = {}
for iter in range(max_iters):
snake_case__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 274 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ = False
class snake_case ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""")
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
_snake_case : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""")
_snake_case : Tuple = torch.manual_seed(0)
_snake_case : str = pipe(
image=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_snake_case : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : int = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 317 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class snake_case_ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[str] ) -> Union[str, Any]:
lowercase__ : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ : Dict = [tuple(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else key for key in keys]
lowercase__ : Optional[Any] = Counter(lowercase_ )
lowercase__ : Any = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Any=False ) -> int:
lowercase__ : Union[str, Any] = super().construct_mapping(lowercase_ , deep=lowercase_ )
self._check_no_duplicates_on_constructed_node(lowercase_ )
return mapping
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Union[str, Any] = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ : List[Any] = full_content[1:].index("---") + 1
lowercase__ : Any = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(_lowerCamelCase)
class snake_case_ ( __A ):
# class attributes
__A : List[str] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : str , lowercase_ : Path ) -> "DatasetMetadata":
with open(lowercase_ , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ : str = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase_ )
else:
return cls()
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Path ) -> str:
if path.exists():
with open(lowercase_ , encoding="utf-8" ) as readme_file:
lowercase__ : Any = readme_file.read()
else:
lowercase__ : Union[str, Any] = None
lowercase__ : List[str] = self._to_readme(lowercase_ )
with open(lowercase_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[str] = None ) -> str:
if readme_content is not None:
lowercase__ , lowercase__ : Tuple = _split_yaml_from_readme(lowercase_ )
lowercase__ : Any = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ : int = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Optional[int] , lowercase_ : str ) -> "DatasetMetadata":
lowercase__ : int = yaml.load(lowercase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ : List[str] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase_ , allow_unicode=lowercase_ , encoding="utf-8" , ).decode("utf-8" )
UpperCamelCase = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCamelCase = ap.parse_args()
UpperCamelCase = Path(args.readme_filepath)
UpperCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A_ (unittest.TestCase ):
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case : int = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
a :Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
"""simple docstring"""
if exitstatus == 5:
a :int = 0
# Doctest custom flag to ignore output.
snake_case : int = doctest.register_optionflag('''IGNORE_RESULT''')
snake_case : List[str] = doctest.OutputChecker
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
snake_case : Optional[int] = CustomOutputChecker
snake_case : Optional[Any] = HfDoctestModule
snake_case : int = HfDocTestParser
| 281 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 281 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : List[Any] =get_logger(__name__)
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = module._original_module if isinstance(__lowerCAmelCase , _PatchedModuleObj ) else module
class _A :
snake_case__ : Tuple = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = obj
lowercase = target
lowercase = new
lowercase = target.split(""".""" )[0]
lowercase = {}
lowercase = attrs or []
def __enter__( self ):
"""simple docstring"""
*lowercase , lowercase = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowerCAmelCase ) ):
try:
lowercase = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase = getattr(self.obj , __lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowercase = obj_attr
# patch at top level
setattr(self.obj , __lowerCAmelCase , _PatchedModuleObj(__lowerCAmelCase , attrs=self.attrs ) )
lowercase = getattr(self.obj , __lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowerCAmelCase , __lowerCAmelCase , _PatchedModuleObj(getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , attrs=self.attrs ) )
lowercase = getattr(__lowerCAmelCase , __lowerCAmelCase )
# finally set the target attribute
setattr(__lowerCAmelCase , __lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase = getattr(import_module(""".""".join(__lowerCAmelCase ) ) , __lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowerCAmelCase ) is attr_value:
lowercase = getattr(self.obj , __lowerCAmelCase )
setattr(self.obj , __lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase = globals()["""__builtins__"""][target_attr]
setattr(self.obj , __lowerCAmelCase , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *__lowerCAmelCase ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __lowerCAmelCase , self.original.pop(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def A__ ( self ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 197 | """simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> Optional[Any]:
'''simple docstring'''
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
_UpperCamelCase = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def UpperCamelCase_( snake_case__: List[str] , snake_case__: str ) -> str:
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = json.loads(f.read() )
UpperCAmelCase__ = collections.OrderedDict()
UpperCAmelCase__ = collections.OrderedDict()
UpperCAmelCase__ = collections.OrderedDict()
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(snake_case__ ):
UpperCAmelCase__ = b
UpperCAmelCase__ = idx
for wd in b:
UpperCAmelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__(self , __a , __a , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|startoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(__a ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
UpperCAmelCase__ = do_clean_text
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__a )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ''.join(__a ).strip()
return out_string
def UpperCamelCase__ (self , __a ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = 0
if os.path.isdir(__a ):
UpperCAmelCase__ = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
UpperCAmelCase__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
UpperCAmelCase__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(__a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
UpperCAmelCase__ = token_index
writer.write(','.join(__a ) + '\n' )
index += 1
with open(__a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = vocab # same as swe
UpperCAmelCase__ = ids_to_tokens # same as bpe
UpperCAmelCase__ = emoji
UpperCAmelCase__ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase__ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
UpperCAmelCase__ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
UpperCAmelCase__ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
UpperCAmelCase__ = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCAmelCase__ = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
UpperCAmelCase__ = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
UpperCAmelCase__ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
UpperCAmelCase__ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
UpperCAmelCase__ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__(self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.content_repattera.sub('<URL>' , __a )
UpperCAmelCase__ = self.content_repattera.sub('<EMAIL>' , __a )
UpperCAmelCase__ = self.content_repattera.sub('<TEL>' , __a )
UpperCAmelCase__ = self.content_repattera.sub('<DATE>' , __a )
UpperCAmelCase__ = self.content_repattera.sub('<DATE>' , __a )
UpperCAmelCase__ = self.content_repattera.sub('<PRICE>' , __a )
UpperCAmelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase__ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase__ (self , __a , __a=False ) -> str:
"""simple docstring"""
UpperCAmelCase__ = text.replace(' ' , '<SP>' )
UpperCAmelCase__ = text.replace(' ' , '<SP>' )
UpperCAmelCase__ = text.replace('\r\n' , '<BR>' )
UpperCAmelCase__ = text.replace('\n' , '<BR>' )
UpperCAmelCase__ = text.replace('\r' , '<BR>' )
UpperCAmelCase__ = text.replace('\t' , '<TAB>' )
UpperCAmelCase__ = text.replace('—' , 'ー' )
UpperCAmelCase__ = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase__ = text.replace(__a , __a )
if clean:
UpperCAmelCase__ = self.clean_text(__a )
def check_simbol(__a ):
UpperCAmelCase__ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__a ):
UpperCAmelCase__ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
while pos < len(__a ):
UpperCAmelCase__ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
UpperCAmelCase__ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase__ = e
else:
UpperCAmelCase__ = pos + 1
UpperCAmelCase__ = text[pos:end]
if check_simbol(__a ):
result.append('<KIGOU>' )
elif checkuae(__a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
UpperCAmelCase__ = end
return result
def UpperCamelCase__ (self , __a , __a="\n" ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode('utf-8' , errors='replace' ) )
UpperCAmelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode('utf-8' , errors='replace' ) )
UpperCAmelCase__ = ''.join(__a )
return text
| 335 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase = 2**power
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = list(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE_ )
return sum_of_num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
_SCREAMING_SNAKE_CASE = solution(power)
print("Sum of the digits is: ", result)
| 158 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowerCAmelCase )
self.set_fail_transitions()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _snake_case ( self , _lowerCAmelCase ) -> None:
_lowerCAmelCase = 0
for character in keyword:
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , _lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase = len(self.adlist ) - 1
else:
_lowerCAmelCase = next_state
self.adlist[current_state]["output"].append(_lowerCAmelCase )
def _snake_case ( self ) -> None:
_lowerCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = 0
while q:
_lowerCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowerCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowerCAmelCase = self.adlist[state]["fail_state"]
_lowerCAmelCase = self.find_next_state(
_lowerCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase = 0
_lowerCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _snake_case ( self , _lowerCAmelCase ) -> dict[str, list[int]]:
_lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
while (
self.find_next_state(_lowerCAmelCase , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase = self.adlist[current_state]["fail_state"]
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , string[i] )
if next_state is None:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase = []
result[key].append(i - len(_lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = """▁"""
a_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
a_ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
a_ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
__A : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__A : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__A : Dict = 1
__A : Tuple = len(self.sp_model ) + self.fairseq_offset
__A : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__A : Any = self.__dict__.copy()
__A : Optional[Any] = None
__A : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A : Dict = {}
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A : Union[str, Any] = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : str = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
__A : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 179 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = 1
__A : Any = 3
__A : List[str] = (32, 32)
__A : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet_upscale
__A : Union[str, Any] = DDPMScheduler()
__A : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__A : int = self.dummy_vae
__A : int = self.dummy_text_encoder
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = '''A painting of a squirrel eating a burger'''
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
__A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : str = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
__A : Tuple = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.dummy_cond_unet_upscale
__A : List[str] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[int] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Any = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Any = '''A painting of a squirrel eating a burger'''
__A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_cond_unet_upscale
__A : int = DDPMScheduler()
__A : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[Any] = self.dummy_vae
__A : List[str] = self.dummy_text_encoder
__A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__A : Union[str, Any] = unet.half()
__A : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__A : Optional[Any] = torch.manual_seed(0 )
__A : Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
__A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__A : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Union[str, Any] = '''a cat sitting on a park bench'''
__A : Union[str, Any] = torch.manual_seed(0 )
__A : Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__A : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Dict = '''a cat sitting on a park bench'''
__A : Any = torch.manual_seed(0 )
__A : Optional[int] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__A : Tuple = '''a cat sitting on a park bench'''
__A : Tuple = torch.manual_seed(0 )
__A : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 179 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : int = XLMTokenizer
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def a (self : int , a__ : Any ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = XLMTokenizer(self.vocab_file , self.merges_file )
__snake_case = '''lower'''
__snake_case = ['''low''', '''er</w>''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + ['''<unk>''']
__snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 238 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = [0] * len(A_ )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = [1] * len(A_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A_ ) ):
if indegree[i] == 0:
queue.append(A_ )
while queue:
_lowerCamelCase : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(A_ )
print(max(A_ ) )
# Adjacency list of Graph
lowerCAmelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 72 |
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mvp'
__magic_name__ = ['past_key_values']
__magic_name__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __snake_case=5_0_2_6_7 , __snake_case=1_0_2_4 , __snake_case=1_2 , __snake_case=4_0_9_6 , __snake_case=1_6 , __snake_case=1_2 , __snake_case=4_0_9_6 , __snake_case=1_6 , __snake_case=0.0 , __snake_case=0.0 , __snake_case="gelu" , __snake_case=1_0_2_4 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=0.0 , __snake_case=False , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=True , __snake_case=2 , __snake_case=2 , __snake_case=False , __snake_case=1_0_0 , __snake_case=8_0_0 , **__snake_case , ):
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 213 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=6_4 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = vocab_size - 1
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = self.num_labels
snake_case = GPTNeoXForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = True
snake_case = GPTNeoXForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past['''hidden_states'''][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['''hidden_states'''][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = GPTNeoXModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=6_4 , num_attention_heads=8 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def a_ ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a_ ( self , __snake_case ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = ids_tensor([1, 1_0] , config.vocab_size )
snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = GPTNeoXModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
snake_case = original_model(__snake_case ).last_hidden_state
snake_case = original_model(__snake_case ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case = {'''type''': scaling_type, '''factor''': 10.0}
snake_case = GPTNeoXModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
snake_case = scaled_model(__snake_case ).last_hidden_state
snake_case = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__snake_case )
snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case = model.generate(**__snake_case , do_sample=__snake_case , max_new_tokens=2_0 )
snake_case = tokenizer.batch_decode(__snake_case )[0]
self.assertEqual(__snake_case , __snake_case )
| 213 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return base * power(_UpperCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
A : List[Any] = int(input("Enter the base: ").strip())
A : str = int(input("Enter the exponent: ").strip())
A : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 57 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = "▁" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "<unk>" , _SCREAMING_SNAKE_CASE = "</s>" , _SCREAMING_SNAKE_CASE = "<pad>" , ) -> List[Any]:
snake_case_ : List[Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
snake_case_ : str = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : Optional[int] = token_dict["token"]
snake_case_ : Dict = Tokenizer(Unigram() )
snake_case_ : Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
snake_case_ : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : List[str] = decoders.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
snake_case_ : str = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8000 , _SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : int = [files]
self._tokenizer.train(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8000 , _SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def _lowerCAmelCase ( self ) -> str:
snake_case_ : str = json.loads(self._tokenizer.to_str() )
snake_case_ : Any = self.special_tokens["unk"]["id"]
snake_case_ : str = Tokenizer.from_str(json.dumps(_SCREAMING_SNAKE_CASE ) )
| 36 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=4 , ) -> List[str]:
snake_case_ : Dict = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Tuple = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[Any] = num_choices
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : str = True
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[str] = True
A : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = FlaxBertModelTester(self )
@slow
def _lowerCAmelCase ( self ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ : int = FlaxBertModel.from_pretrained("bert-base-cased" )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 36 | 1 |
class SCREAMING_SNAKE_CASE__ : # Public class to implement a graph
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = row
A : str = col
A : Optional[Any] = graph
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
# Checking all 8 elements surrounding nth element
A : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A : str = [-1, 0, 1, -1, 1, -1, 0, 1]
A : Any = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k], j + col_nbr[k], lowerCamelCase__ ):
self.diffs(i + row_nbr[k], j + col_nbr[k], lowerCamelCase__ )
def _lowerCAmelCase ( self ): # And finally, count all islands.
A : Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
A : List[str] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
count += 1
return count
| 116 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __A ( UpperCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_encoder_blocks''' ) )
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=64 , __A=3 , __A=4 , __A=[2, 2, 2, 2] , __A=[8, 4, 2, 1] , __A=[16, 32, 64, 128] , __A=[1, 4, 8, 16] , __A=[1, 2, 4, 8] , __A=True , __A=True , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.02 , __A=3 , __A=None , ) -> Dict:
a =parent
a =batch_size
a =image_size
a =num_channels
a =num_encoder_blocks
a =sr_ratios
a =depths
a =hidden_sizes
a =downsampling_rates
a =num_attention_heads
a =is_training
a =use_labels
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =initializer_range
a =num_labels
a =scope
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
if self.use_labels:
a =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Optional[int]:
a =SegformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a =model(__lowerCamelCase )
a =a =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> str:
a =self.num_labels
a =SegformerForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a =model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a =model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Dict:
a =1
a =SegformerForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__lowerCamelCase )
a =model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =SegformerModelTester(self )
a =SegformerConfigTester(self , config_class=__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowerCamelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__lowerCamelCase )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
for model_class in self.all_model_classes:
a =True
a =False
a =True
a =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a =outputs.attentions
a =sum(self.model_tester.depths )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a =outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
a =(self.model_tester.image_size // 4) ** 2
a =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a =(self.model_tester.image_size // 32) ** 2
a =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a =len(__lowerCamelCase )
# Check attention is always last and order is fine
a =True
a =True
a =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
a =outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
a =(self.model_tester.image_size // 4) ** 2
a =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a =outputs.hidden_states
a =self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ):
continue
a =model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
a =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a =model(**__lowerCamelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =SegformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# only resize + normalize
a =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
a =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
a =prepare_img()
a =image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
a =encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
a =model(__lowerCamelCase )
a =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a =torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# only resize + normalize
a =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
a =SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__lowerCamelCase )
a =prepare_img()
a =image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
a =encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
a =model(__lowerCamelCase )
a =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a =torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-1 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# only resize + normalize
a =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
a =SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
a =prepare_img()
a =image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
a =encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
a =model(__lowerCamelCase )
a =outputs.logits.detach().cpu()
a =image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(500, 300)] )
a =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
a =image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
a =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase ) | 367 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[int]:
if str(__A ).startswith('''mps''' ):
a =torch.manual_seed(__A )
else:
a =torch.Generator(device=__A ).manual_seed(__A )
a =floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 215 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Optional[int] =MODEL_FOR_MASKED_LM_MAPPING
lowercase : Any =TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''tf''' )
lowerCamelCase_ =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38_015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25_506, '''token_str''': ''' accuser'''},
], )
lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 38_015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 25_506,
'''token_str''': ''' accuser''',
},
], )
lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_941, '''token_str''': ''' Te'''},
], )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', top_k=2, framework='''pt''' )
lowerCamelCase_ =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35_676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''},
], )
lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS'''},
], )
lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13_606, '''token_str''': ''' Clara'''},
], )
lowerCamelCase_ =unmasker('''My name is <mask> <mask>''', top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=6 ), [
[
{
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
], )
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline('''fill-mask''', model='''hf-internal-testing/tiny-random-distilbert''', device=0, framework='''pt''' )
# convert model to fp16
pipe.model.half()
lowerCamelCase_ =pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''pt''' )
self.run_large_test(lowerCAmelCase )
@slow
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''distilroberta-base''', top_k=2, framework='''tf''' )
self.run_large_test(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase ), [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1_573, '''token_str''': ''' Chris'''},
], )
lowerCamelCase_ =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase ), [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2_201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 12_790,
'''token_str''': ''' Lyon''',
},
], )
lowerCamelCase_ =unmasker('''My name is <mask>''', targets=[''' Patrick''', ''' Clara''', ''' Teven'''], top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase ), [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2_941, '''token_str''': ''' Te'''},
], )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''pt''' )
lowerCamelCase_ =None
lowerCamelCase_ =None
self.run_pipeline_test(lowerCAmelCase, [] )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pipeline(task='''fill-mask''', model='''sshleifer/tiny-distilroberta-base''', framework='''tf''' )
lowerCamelCase_ =None
lowerCamelCase_ =None
self.run_pipeline_test(lowerCAmelCase, [] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
lowerCamelCase_ =[
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =fill_masker.tokenizer
lowerCamelCase_ =fill_masker.model
lowerCamelCase_ =fill_masker(
f'''This is a {tokenizer.mask_token}''', )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
lowerCamelCase_ =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCAmelCase, [
[
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
],
[
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
],
], )
with self.assertRaises(lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCAmelCase ):
fill_masker('''This is''' )
self.run_test_top_k(lowerCAmelCase, lowerCAmelCase )
self.run_test_targets(lowerCAmelCase, lowerCAmelCase )
self.run_test_top_k_targets(lowerCAmelCase, lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase, lowerCAmelCase )
self.fill_mask_with_multiple_masks(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =tokenizer.get_vocab()
lowerCamelCase_ =sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, targets=lowerCAmelCase )
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
lowerCamelCase_ ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase )
lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) )
# Call argument
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
lowerCamelCase_ ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs}, lowerCAmelCase )
lowerCamelCase_ =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs}, set(lowerCAmelCase ) )
# Score equivalence
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase )
lowerCamelCase_ =[top_mask['''token_str'''] for top_mask in outputs]
lowerCamelCase_ =[top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ) == set(lowerCAmelCase ):
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=lowerCAmelCase )
lowerCamelCase_ =[top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets=[''''''] )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', targets='''''' )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase, top_k=2 )
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2 )
self.assertEqual(
lowerCAmelCase, [
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
], )
self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =tokenizer.get_vocab()
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
# top_k=2, ntargets=3
lowerCamelCase_ =sorted(vocab.keys() )[:3]
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=2, targets=lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase_ =[el['''token_str'''] for el in sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x["score"], reverse=lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ).issubset(lowerCAmelCase ):
lowerCamelCase_ =fill_masker(f'''This is a {tokenizer.mask_token}''', top_k=3, targets=lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCAmelCase ), nested_simplify(lowerCAmelCase ) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
lowerCamelCase_ =tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase_ =sorted(vocab.keys() )[:3]
lowerCamelCase_ =[targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase_ =fill_masker(f'''My name is {tokenizer.mask_token}''', targets=lowerCAmelCase, top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCAmelCase ), 3 )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =FillMaskPipeline(model=lowerCAmelCase, tokenizer=lowerCAmelCase )
lowerCamelCase_ =fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''', top_k=2 )
self.assertEqual(
lowerCAmelCase, [
[
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
],
[
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
],
[
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
{'''sequence''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase ), '''token''': ANY(lowerCAmelCase ), '''token_str''': ANY(lowerCAmelCase )},
],
], )
| 75 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = """▁"""
_SCREAMING_SNAKE_CASE : List[str] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_SCREAMING_SNAKE_CASE : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
_SCREAMING_SNAKE_CASE : str = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __a ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : Any="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : int="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : Optional[Any]="<mask>" , lowercase_ : str=None , lowercase_ : str=None , lowercase_ : List[str]=None , lowercase_ : Dict = None , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=False , **lowercase_ : Optional[int] , ):
UpperCamelCase__ : Dict =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase__ : List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ : Union[str, Any] =legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ : int ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ : List[Any] =1
UpperCamelCase__ : Optional[Any] =len(self.sp_model )
UpperCamelCase__ : Any ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
UpperCamelCase__ : str ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ : str =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ : List[str] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ : List[str] =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ : Any =src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ : Optional[Any] =self.lang_code_to_id[self._src_lang]
UpperCamelCase__ : Optional[Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
UpperCamelCase__ : Tuple =self.__dict__.copy()
UpperCamelCase__ : Tuple =None
UpperCamelCase__ : int =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , lowercase_ : Dict ):
UpperCamelCase__ : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[int] ={}
UpperCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCAmelCase ( self : str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCAmelCase ( self : Tuple ):
return self._src_lang
@src_lang.setter
def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCamelCase__ : Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : str = None , lowercase_ : int = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] =[1] * len(self.prefix_tokens )
UpperCamelCase__ : int =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _lowerCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : Any = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : str = None ):
UpperCamelCase__ : Any =[self.sep_token_id]
UpperCamelCase__ : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : Union[str, Any] =src_lang
UpperCamelCase__ : List[str] =self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any =self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] =tgt_lang_id
return inputs
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self : Tuple , lowercase_ : str ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Tuple , lowercase_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : Dict =self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self : Dict , lowercase_ : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self : int , lowercase_ : Optional[int] ):
UpperCamelCase__ : Tuple =''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowerCAmelCase ( self : int , lowercase_ : List[str] , lowercase_ : List[str] = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ : Dict =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _lowerCAmelCase ( self : int , lowercase_ : str , lowercase_ : Dict = "eng_Latn" , lowercase_ : Optional[Any] = None , lowercase_ : Any = "fra_Latn" , **lowercase_ : Optional[Any] , ):
UpperCamelCase__ : int =src_lang
UpperCamelCase__ : int =tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCAmelCase ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCAmelCase ( self : int , lowercase_ : int ):
UpperCamelCase__ : int =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ : Tuple =[]
UpperCamelCase__ : List[Any] =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ : Dict =[self.cur_lang_code]
UpperCamelCase__ : Dict =[self.eos_token_id]
def _lowerCAmelCase ( self : List[str] , lowercase_ : Dict ):
UpperCamelCase__ : Union[str, Any] =self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ : List[Any] =[]
UpperCamelCase__ : List[Any] =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ : Tuple =[self.cur_lang_code]
UpperCamelCase__ : Any =[self.eos_token_id]
| 352 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __A ( lowerCAmelCase__ ) -> Dict:
raise NotImplementedError()
@abstractmethod
def __A ( self ) -> Optional[int]:
raise NotImplementedError()
| 113 | import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ : Union[str, Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Dict = int(re.match(r'.*layer_(\d*).*' , A__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def UpperCamelCase__ ( A__ ) -> str:
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[str] = re.search(r'[^\d](\d+)$' , str(A__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
snake_case__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : int = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case__ : Tuple = os.listdir(A__ )
snake_case__ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : str = {'weight_map': {}, 'metadata': {}}
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = None
snake_case__ : Any = BloomConfig()
for j, file in enumerate(A__ ):
print('Processing file: {}'.format(A__ ) )
snake_case__ : str = None
for i in range(A__ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : List[Any] = temp.pop(A__ )
if tensors is None:
snake_case__ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Dict = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Optional[int] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : int = total_size
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : List[str] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
else:
snake_case__ : int = BloomModel(A__ )
snake_case__ : Dict = os.listdir(A__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : List[str] = None
for i, file in enumerate(A__ ):
snake_case__ : Dict = None
for i in range(A__ ):
# load all TP files
snake_case__ : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[str] = list(temp.keys() )
for key in keys:
snake_case__ : Any = temp.pop(A__ )
if tensors is None:
snake_case__ : Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Optional[int] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
snake_case__ : List[Any] = set(other_keys.missing_keys )
else:
snake_case__ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
snake_case__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 143 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class A ( lowerCAmelCase_ ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase, **__lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
if prompt is not None:
lowerCAmelCase_ = prompt
if generate_kwargs is not None:
lowerCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase, **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise ValueError(
f"Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCAmelCase_ = self.model.config.model_type
if model_type == "git":
lowerCAmelCase_ = self.image_processor(images=__lowerCAmelCase, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(text=__lowerCAmelCase, add_special_tokens=__lowerCAmelCase ).input_ids
lowerCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase_ = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase_ = self.image_processor(images=__lowerCAmelCase, header_text=__lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase_ = self.image_processor(images=__lowerCAmelCase, return_tensors=self.framework )
lowerCAmelCase_ = self.tokenizer(__lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowerCAmelCase_ = self.image_processor(images=__lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase_ = None
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCAmelCase_ = None
if generate_kwargs is None:
lowerCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase_ = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase_ = self.model.generate(__lowerCAmelCase, **__lowerCAmelCase, **__lowerCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
for output_ids in model_outputs:
lowerCAmelCase_ = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase, skip_special_tokens=__lowerCAmelCase, )
}
records.append(__lowerCAmelCase )
return records
| 358 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 44 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 6008_5147_5143 ) -> int:
try:
lowerCamelCase__ : Optional[int] = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase__ : int = i
while n % i == 0:
lowerCamelCase__ : str = n // i
i += 1
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 45 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return 10 - x * x
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase__ : Tuple = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(_UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) < 0:
lowerCamelCase__ : Tuple = c
else:
lowerCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 45 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = load_from_cache_file
SCREAMING_SNAKE_CASE = file_format
SCREAMING_SNAKE_CASE = Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __A ( self ) -> Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 113 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 0 |
import math
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if (
not isinstance(_UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if (
not isinstance(_UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int
_UpperCAmelCase : int
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Dict = size
def __getitem__( self : Dict , lowerCAmelCase__ : int):
return iter(self._graph[vertex])
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self._size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = deque([start_vertex])
SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_: List[Any] = 0
while queue:
SCREAMING_SNAKE_CASE_: int = queue.popleft()
SCREAMING_SNAKE_CASE_: str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_: Optional[int] = current_distance + edge.weight
SCREAMING_SNAKE_CASE_: str = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_: Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=99 , __UpperCamelCase : str=32 , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : List[str]=None , ) -> int:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCamelCase ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict ) -> Tuple:
_UpperCamelCase = TFBlipTextModel(config=__UpperCamelCase )
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , training=__UpperCamelCase )
_UpperCamelCase = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ) -> str:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self : List[Any] ) -> Any:
pass
@slow
def _UpperCamelCase ( self : str ) -> Optional[int]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[str]=True ) -> Union[str, Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCamelCase )
| 256 | """simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Whether to SortishSamler or not.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''whether to use adafactor'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''})
snake_case__ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'''} , )
| 256 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCamelCase = 25_0004
__UpperCamelCase = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self) -> Dict:
snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
snake_case_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def a_ ( self) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__)
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__)
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__)
snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__))
shutil.rmtree(lowerCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "facebook/mbart-large-en-ro"
SCREAMING_SNAKE_CASE_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def a_ ( cls) -> Tuple:
snake_case_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO')
snake_case_ = 1
return cls
def a_ ( self) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 25_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 25_0004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 25_0020)
def a_ ( self) -> Any:
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
def a_ ( self) -> Any:
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids)
snake_case_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.tokenizer(lowerCAmelCase__, max_length=lowerCAmelCase__, truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']), [25_0026, 25_0001])
def a_ ( self) -> List[Any]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = MBartTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCAmelCase__)
@require_torch
def a_ ( self) -> int:
snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt')
snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self) -> str:
snake_case_ = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=len(self.expected_src_tokens), return_tensors='pt', )
snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE])
def a_ ( self) -> Optional[Any]:
snake_case_ = self.tokenizer(self.src_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=3, return_tensors='pt')
snake_case_ = self.tokenizer(
text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=10, return_tensors='pt')
snake_case_ = targets['input_ids']
snake_case_ = shift_tokens_right(lowerCAmelCase__, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(lowerCAmelCase__), {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 25_0004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
}, )
| 312 | """simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
def count_of_possible_combinations(UpperCAmelCase_ : List[Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case : str = sum(
count_of_possible_combinations_with_dp_array(target - item , A_ )
for item in array )
__snake_case : Optional[Any] = answer
return answer
__snake_case : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A_ , A_ )
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = [0] * (target + 1)
__snake_case : Optional[int] = 1
for i in range(1 , target + 1 ):
for j in range(A_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple= 3
_a : str= 5
_a : Optional[int]= [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 172 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(A_ , '''config.json''' ) ):
os.remove(os.path.join(A_ , '''config.json''' ) )
if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : Optional[Any] = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ )
lowerCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( A_ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device )
lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) ,) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A_ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ )
lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : str = float('''Inf''' )
lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : int = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ )
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
lowerCAmelCase__ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
lowerCAmelCase__ : Optional[Any] = 1 / loss
lowerCAmelCase__ : Tuple = datetime.now() - before_time
lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
lowerCAmelCase__ : int = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A_ , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A_ , default=42 )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A_ )
# Prepare dataset
lowerCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),)
lowerCAmelCase__ : Tuple = TensorDataset(*A_ )
lowerCAmelCase__ : Optional[int] = RandomSampler(A_ )
lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 106 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
__lowercase = time.time()
locka.acquire(__SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = "a" * 1000 + ".lock"
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 369 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 0 |
"""simple docstring"""
from math import factorial
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = real
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [1] * rank
else:
UpperCAmelCase = rank
def __repr__( self :List[str] ) -> Optional[Any]:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self :Tuple , lowercase_ :str ) -> Any:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
UpperCAmelCase = self.duals.copy()
UpperCAmelCase = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
__UpperCamelCase = __add__
def __sub__( self :List[Any] , lowercase_ :int ) -> str:
return self + other * -1
def __mul__( self :int , lowercase_ :List[str] ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
UpperCAmelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
__UpperCamelCase = __mul__
def __truediv__( self :Tuple , lowercase_ :List[Any] ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self :Tuple , lowercase_ :Optional[Any] ) -> Dict:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self :Tuple , lowercase_ :List[Any] ) -> Any:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
UpperCAmelCase = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if not callable(lowercase_ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(lowercase_ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('differentiate() requires an int as input for order' )
UpperCAmelCase = Dual(lowercase_ , 1 )
UpperCAmelCase = func(lowercase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase ( lowercase_ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 78 | '''simple docstring'''
import math
import unittest
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __lowercase ( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Optional[int]:
'''simple docstring'''
_A = [x.strip() for x in open(__lowercase ).readlines()]
_A = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
_A = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 174 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Optional[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : int = TaTokenizerFast
a__ : List[Any] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : List[Any] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 80 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase__ : Tuple = TypeVar("""T""")
class a__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : bool = True ) ->None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE : Dict = directed
def _lowercase ( self : int , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE : Tuple = [destination_vertex]
SCREAMING_SNAKE_CASE : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE : Dict = [destination_vertex]
SCREAMING_SNAKE_CASE : List[Any] = []
return self
def __repr__( self : Dict ) ->str:
"""simple docstring"""
return pformat(self.adj_list )
| 245 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> List[str]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : List[Any] ) -> int:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 73 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 180 |
lowercase__ : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 180 | 1 |
lowerCamelCase : str = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 204 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = BlenderbotConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : int , A_ : Optional[int] , A_ : List[str]=13 , A_ : str=7 , A_ : Any=True , A_ : Any=False , A_ : Optional[Any]=99 , A_ : List[str]=32 , A_ : List[str]=2 , A_ : Dict=4 , A_ : List[str]=37 , A_ : List[str]=0.1 , A_ : Optional[int]=0.1 , A_ : str=20 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : int=0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def a__ ( self : Tuple , A_ : Union[str, Any] , A_ : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModel(config=A_ ).get_decoder()
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = inputs_dict['head_mask']
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : List[Any]=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : Tuple=None , lowercase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['''My friends are cool but they eat too many carbs.''']
UpperCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(self.src_text , return_tensors='tf' )
lowerCamelCase_ = self.model.generate(
model_inputs.input_ids , )
lowerCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 204 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Any = BertJapaneseTokenizer
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Dict = True
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
a__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
a__ : List[Any] = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : int = self.get_input_output_texts(__lowercase )
a__ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
a__ : Tuple = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
a__ : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(__lowercase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(__lowercase )
a__ : Dict = """こんにちは、世界。\nこんばんは、世界。"""
a__ : List[str] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a__ : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__lowercase , """wb""" ) as handle:
pickle.dump(__lowercase , __lowercase )
with open(__lowercase , """rb""" ) as handle:
a__ : Optional[Any] = pickle.load(__lowercase )
a__ : Union[str, Any] = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
try:
a__ : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
try:
a__ : Tuple = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = MecabTokenizer(do_lower_case=__lowercase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
try:
a__ : Union[str, Any] = MecabTokenizer(
do_lower_case=__lowercase , normalize_text=__lowercase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[Any] = MecabTokenizer(normalize_text=__lowercase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(__lowercase )
a__ : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
a__ : Tuple = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a__ : Optional[int] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__lowercase , """wb""" ) as handle:
pickle.dump(__lowercase , __lowercase )
with open(__lowercase , """rb""" ) as handle:
a__ : Dict = pickle.load(__lowercase )
a__ : str = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Any = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = SudachiTokenizer(do_lower_case=__lowercase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = SudachiTokenizer(normalize_text=__lowercase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = SudachiTokenizer(trim_whitespace=__lowercase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(__lowercase )
a__ : Dict = """こんにちは、世界。\nこんばんは、世界。"""
a__ : Optional[int] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a__ : str = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__lowercase , """wb""" ) as handle:
pickle.dump(__lowercase , __lowercase )
with open(__lowercase , """rb""" ) as handle:
a__ : Optional[int] = pickle.load(__lowercase )
a__ : Optional[int] = tokenizer_new.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : List[Any] = JumanppTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = JumanppTokenizer(normalize_text=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[str] = JumanppTokenizer(trim_whitespace=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
a__ : str = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : List[Any] = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
a__ : Any = tokenizer.subword_tokenizer
a__ : Optional[Any] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(__lowercase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
a__ : Union[str, Any] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(__lowercase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
a__ : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__lowercase )
a__ : Dict = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__lowercase )
a__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = BertJapaneseTokenizer
__lowerCAmelCase :List[Any] = False
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Tuple:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : str = """こんにちは、世界。 \nこんばんは、世界。"""
a__ : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
a__ : Dict = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
__lowercase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
a__ : List[str] = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : Dict = CharacterTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : int = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
a__ : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__lowercase )
a__ : List[str] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__lowercase )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = """cl-tohoku/bert-base-japanese"""
a__ : int = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(__lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
a__ : int = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 362 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Any ="\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : str ="\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : Optional[Any] ="\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , ) -> Any:
"""simple docstring"""
a__ : Any = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
a__ : str = [[refs[i] for refs in references] for i in range(__lowercase )]
a__ : int = TER(
normalized=__lowercase , no_punct=__lowercase , asian_support=__lowercase , case_sensitive=__lowercase , )
a__ : Optional[int] = sb_ter.corpus_score(__lowercase , __lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 266 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Dict = logging.get_logger(__name__)
A_ :Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""megatron-bert"""
def __init__( self , lowerCamelCase__=29056 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[str] =vocab_size
__UpperCamelCase : Tuple =hidden_size
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : str =num_attention_heads
__UpperCamelCase : int =hidden_act
__UpperCamelCase : int =intermediate_size
__UpperCamelCase : Optional[int] =hidden_dropout_prob
__UpperCamelCase : List[Any] =attention_probs_dropout_prob
__UpperCamelCase : List[str] =max_position_embeddings
__UpperCamelCase : Optional[int] =type_vocab_size
__UpperCamelCase : Optional[Any] =initializer_range
__UpperCamelCase : Dict =layer_norm_eps
__UpperCamelCase : Optional[int] =position_embedding_type
__UpperCamelCase : Union[str, Any] =use_cache
| 71 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Dict = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''xglm'''
a : List[Any] =['''past_key_values''']
a : Union[str, Any] ={
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=2_5_6_0_0_8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: List[str] = d_model
UpperCamelCase_: List[Any] = ffn_dim
UpperCamelCase_: List[Any] = num_layers
UpperCamelCase_: List[Any] = attention_heads
UpperCamelCase_: Tuple = activation_function
UpperCamelCase_: Tuple = dropout
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: Optional[Any] = activation_dropout
UpperCamelCase_: List[str] = layerdrop
UpperCamelCase_: Any = init_std
UpperCamelCase_: Any = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_: Union[str, Any] = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , ) | 292 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 292 | 1 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer
pass
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : int ):
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase_ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __snake_case : int ):
lowercase_ : str = 0
lowercase_ : List[str] = 2
while digits < n:
index += 1
lowercase_ : Any = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase ( __snake_case : int = 1_0_0_0 ):
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , snake_case__ : int = 1_6 , snake_case__ : int = 8_8 , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 3_2 , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase__ : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase__ : Dict = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase__ : Optional[int] = [1, 0]
def __a ( self : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hidden_states
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase__ : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase__ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase__ : Tuple = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase__ : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase__ : Tuple = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 363 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
try:
with open(__UpperCamelCase ,'''rb''' ) as flax_state_f:
lowerCAmelCase_ : List[Any] = from_bytes(__UpperCamelCase ,flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCamelCase ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Optional[int] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCAmelCase_ : Dict = flatten_dict(jax.tree_util.tree_map(lambda __UpperCamelCase : x.dtype == jnp.bfloataa ,__UpperCamelCase ) ).values()
if any(__UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCAmelCase_ : Optional[Any] = jax.tree_util.tree_map(
lambda __UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = ''''''
lowerCAmelCase_ : Optional[int] = flatten_dict(__UpperCamelCase ,sep='''.''' )
lowerCAmelCase_ : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase_ : Any = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase_ : int = flax_key_tuple_array[:-1] + ['''weight''']
lowerCAmelCase_ : Tuple = jnp.transpose(__UpperCamelCase ,(3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase_ : str = flax_key_tuple_array[:-1] + ['''weight''']
lowerCAmelCase_ : Optional[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : str = (
flax_key_tuple_string.replace('''_0''' ,'''.0''' )
.replace('''_1''' ,'''.1''' )
.replace('''_2''' ,'''.2''' )
.replace('''_3''' ,'''.3''' )
.replace('''_4''' ,'''.4''' )
.replace('''_5''' ,'''.5''' )
.replace('''_6''' ,'''.6''' )
.replace('''_7''' ,'''.7''' )
.replace('''_8''' ,'''.8''' )
.replace('''_9''' ,'''.9''' )
)
lowerCAmelCase_ : Tuple = '''.'''.join(__UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase_ : List[Any] = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase ,np.ndarray ) else flax_tensor
lowerCAmelCase_ : Any = torch.from_numpy(__UpperCamelCase )
# remove from missing keys
missing_keys.remove(__UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCamelCase )
pt_model.load_state_dict(__UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase_ : Dict = list(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 103 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable:
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 68 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , a % b )
__lowerCamelCase = a // b
return (y, x - k * y)
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
((__lowerCamelCase) , (__lowerCamelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
__lowerCamelCase = (b % n + n) % n
return b
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 351 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 0 |
def UpperCamelCase__ ( A__ = 400_0000 ) -> int:
snake_case__ : Union[str, Any] = [0, 1]
snake_case__ : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case__ : Union[str, Any] = 0
for j in range(len(a__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 143 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: List[str] = "▁"
lowerCAmelCase: Tuple = {"vocab_file": "sentencepiece.bpe.model"}
lowerCAmelCase: List[Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCAmelCase: List[str] = {
"facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4,
}
# fmt: off
lowerCAmelCase: Union[str, Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class a__( lowercase_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any]=None , __snake_case : List[str]=None , __snake_case : Optional[int]="</s>" , __snake_case : List[str]="</s>" , __snake_case : str="<s>" , __snake_case : str="<unk>" , __snake_case : List[Any]="<pad>" , __snake_case : List[Any]="<mask>" , __snake_case : Optional[Any] = None , **__snake_case : Any , ):
a : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
a : str = {} if sp_model_kwargs is None else sp_model_kwargs
a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a__ , tgt_lang=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a : Any = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Optional[Any] = 1
a : Dict = len(self.sp_model )
a : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a__ )
}
a : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
a : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a : List[str] = src_lang if src_lang is not None else 'en_XX'
a : Dict = self.lang_code_to_id[self._src_lang]
a : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self : int ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ ( self : List[str] ):
return self._src_lang
@src_lang.setter
def lowercase_ ( self : Optional[int] , __snake_case : Any ):
a : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
a : Optional[int] = self.__dict__.copy()
a : Optional[int] = None
return state
def __setstate__( self : Any , __snake_case : Optional[int] ):
a : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Any = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Optional[int] ):
a : List[Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : List[Any] , __snake_case : Union[str, Any] ):
return self.sp_model.encode(a__ , out_type=a__ )
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : List[str] = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : List[Any] , __snake_case : Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : List[str] , __snake_case : int ):
a : List[str] = []
a : List[Any] = ''
a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
a : str = True
a : Optional[Any] = []
else:
current_sub_tokens.append(a__ )
a : Union[str, Any] = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def lowercase_ ( self : Dict , __snake_case : Dict , __snake_case : Any = None ):
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Union[str, Any] = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , 'wb' ) as fi:
a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : int = None , __snake_case : Optional[int] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
a : Union[str, Any] = [1] * len(self.prefix_tokens )
a : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a__ )) + suffix_ones
return prefix_ones + ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def lowercase_ ( self : List[str] , __snake_case : Any , __snake_case : str = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[str] , **__snake_case : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : List[str] = src_lang
a : Dict = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
a : Optional[Any] = self.convert_tokens_to_ids(a__ )
a : Optional[int] = tgt_lang_id
return inputs
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : int = "en_XX" , __snake_case : str = None , __snake_case : List[Any] = "ro_RO" , **__snake_case : Optional[int] , ):
a : int = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def lowercase_ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
a : List[str] = self.lang_code_to_id[src_lang]
a : Union[str, Any] = [self.cur_lang_code_id]
a : Union[str, Any] = [self.eos_token_id]
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : Optional[int] = self.lang_code_to_id[tgt_lang]
a : Optional[int] = [self.cur_lang_code_id]
a : List[Any] = [self.eos_token_id] | 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A = ''''''
A = ''''''
A = ''''''
A = 1 # (0 is vertical, 1 is horizontal)
def __A ( ) -> None:
__a , __a : List[Any] = get_dataset(a_ , a_)
print('''Processing...''')
__a , __a , __a : Dict = update_image_and_anno(a_ , a_ , a_)
for index, image in enumerate(a_):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__a : List[Any] = random_chars(32)
__a : Union[str, Any] = paths[index].split(os.sep)[-1].rsplit('''.''' , 1)[0]
__a : Tuple = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a_ , [cva.IMWRITE_JPEG_QUALITY, 85])
print(F"""Success {index+1}/{len(a_)} with {file_name}""")
__a : List[Any] = []
for anno in new_annos[index]:
__a : Dict = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a_)
with open(F"""/{file_root}.txt""" , '''w''') as outfile:
outfile.write('''\n'''.join(line for line in annos_list))
def __A ( a_ :str , a_ :str) -> tuple[list, list]:
__a : List[str] = []
__a : int = []
for label_file in glob.glob(os.path.join(a_ , '''*.txt''')):
__a : List[str] = label_file.split(os.sep)[-1].rsplit('''.''' , 1)[0]
with open(a_) as in_file:
__a : Tuple = in_file.readlines()
__a : List[str] = os.path.join(a_ , F"""{label_name}.jpg""")
__a : int = []
for obj_list in obj_lists:
__a : Optional[Any] = obj_list.rstrip('''\n''').split(''' ''')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(a_)
labels.append(a_)
return img_paths, labels
def __A ( a_ :list , a_ :list , a_ :int = 1) -> tuple[list, list, list]:
__a : List[Any] = []
__a : Dict = []
__a : str = []
for idx in range(len(a_)):
__a : Tuple = []
__a : int = img_list[idx]
path_list.append(a_)
__a : Union[str, Any] = anno_list[idx]
__a : Union[str, Any] = cva.imread(a_)
if flip_type == 1:
__a : Optional[Any] = cva.flip(a_ , a_)
for bbox in img_annos:
__a : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
__a : Optional[int] = cva.flip(a_ , a_)
for bbox in img_annos:
__a : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(a_)
new_imgs_list.append(a_)
return new_imgs_list, new_annos_lists, path_list
def __A ( a_ :int = 32) -> str:
assert number_char > 1, "The number of character should greater than 1"
__a : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a_) for _ in range(a_))
if __name__ == "__main__":
main()
print('''DONE ✅''') | 160 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=True , ):
__a : Dict = parent
__a : List[str] = batch_size
__a : str = num_channels
__a : Optional[int] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Dict = size_divisor
__a : Dict = do_rescale
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : str = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_rescale''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 160 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase = False
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int=32 )-> Optional[int]:
set_seed(0 )
lowerCAmelCase__ : List[Any] = UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , in_channels=3 , out_channels=3 )
lowerCAmelCase__ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def UpperCAmelCase__( self : int )-> Any:
lowerCAmelCase__ : Union[str, Any] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase__ : int = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_SCREAMING_SNAKE_CASE , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowerCAmelCase__ : Any = [torch.randn((4, 3, 32, 32) ).to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowerCAmelCase__ : Optional[int] = [torch.randint(0 , 1000 , (4,) ).long().to(_SCREAMING_SNAKE_CASE ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase__ : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowerCAmelCase__ : Any = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase__ : List[str] = self.get_model_optimizer(resolution=32 )
model.train().to(_SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase__ : Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowerCAmelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-5 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-5 ) )
| 364 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
_a : List[str] = FLAX_MODEL_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
_a : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
_a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
_a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
_a : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 211 | 0 |
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ : set[int] = set()
return any(
node not in visited and depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for node in graph)
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : int , _lowerCamelCase : set , _lowerCamelCase : set):
visited.add(_lowerCamelCase)
rec_stk.add(_lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :Union[str, Any] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :List[str]) -> bool:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :Optional[int] = None) -> List[Any]:
UpperCAmelCase_ = max_length
UpperCAmelCase_ = max_position_embeddings
@add_start_docstrings(_lowercase)
def __call__( self :Dict , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Any) -> bool:
UpperCAmelCase_ = input_ids.shape[-1]
UpperCAmelCase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :int) -> str:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , _lowercase , )
UpperCAmelCase_ = start_length
UpperCAmelCase_ = max_new_tokens
UpperCAmelCase_ = start_length + max_new_tokens
@add_start_docstrings(_lowercase)
def __call__( self :Any , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return input_ids.shape[-1] >= self.max_length
class a_ ( _snake_case ):
def __init__( self :Any , _lowercase :float , _lowercase :Optional[float] = None) -> Union[str, Any]:
UpperCAmelCase_ = max_time
UpperCAmelCase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowercase)
def __call__( self :List[str] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :int , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Optional[Any]) -> bool:
return any(criteria(_lowercase , _lowercase) for criteria in self)
@property
def __a ( self :Any) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
elif isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
return None
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
UpperCAmelCase_ = stopping_criteria.max_length
UpperCAmelCase_ = deepcopy(__UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCAmelCase ) )
return new_stopping_criteria
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
return round(float(moles / volume ) * nfactor )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=None ,):
'''simple docstring'''
lowercase : Dict = size if size is not None else {"""shortest_edge""": 20}
lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase : str = parent
lowercase : int = batch_size
lowercase : str = num_channels
lowercase : int = image_size
lowercase : List[str] = min_resolution
lowercase : str = max_resolution
lowercase : Dict = do_resize
lowercase : Dict = size
lowercase : Dict = do_center_crop
lowercase : str = crop_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Any= MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case ,"""do_resize""" ) )
self.assertTrue(hasattr(snake_case ,"""size""" ) )
self.assertTrue(hasattr(snake_case ,"""do_center_crop""" ) )
self.assertTrue(hasattr(snake_case ,"""crop_size""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,Image.Image )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Tuple = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,torch.Tensor )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 20 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : List[str] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : str = len(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = matrix_length // 2
__lowerCamelCase : List[Any] = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Union[str, Any] = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Optional[int] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : List[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = split_matrix(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Any = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[Any] = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : int = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : Dict = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Union[str, Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : Dict = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : Any = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : Tuple = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Union[str, Any] = matrixa
__lowerCamelCase : Any = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : Any = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 359 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a =3
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
print('Generating primitive root of p' )
while True:
__lowerCamelCase : Tuple = random.randrange(3 , lowerCamelCase__ )
if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1:
continue
if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
__lowerCamelCase : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number.
__lowerCamelCase : Dict = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p.
__lowerCamelCase : Optional[int] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety.
__lowerCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : int = (key_size, e_a, e_a, p)
__lowerCamelCase : str = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> None:
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__lowerCamelCase , __lowerCamelCase : List[Any] = generate_key(lowerCamelCase__ )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , 'w' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , 'w' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 113 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__UpperCAmelCase):
__lowerCAmelCase : str = ["""flax""", """transformers"""]
def __init__( self : Dict , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : str , *_lowerCamelCase : Dict , **_lowerCamelCase : int ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=__UpperCAmelCase):
__lowerCAmelCase : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : str ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : Tuple , *_lowerCamelCase : str , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=__UpperCAmelCase):
__lowerCAmelCase : Tuple = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_lowerCamelCase : str , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : Union[str, Any] , *_lowerCamelCase : int , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : int , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowercase ( metaclass=__UpperCAmelCase):
__lowerCAmelCase : Dict = ["""flax""", """transformers"""]
def __init__( self : Any , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : str ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls : int , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 167 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
A_ : Dict = nn.Parameter(_UpperCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
A_ : Optional[Any] = nn.Parameter(_UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = np.asarray(weights[0] )
A_ : Optional[Any] = np.asarray(weights[1] )
A_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = np.asarray(weights[0] )
A_ : Optional[int] = np.asarray(weights[1] )
A_ : int = np.asarray(weights[2] )
A_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = weights[0][0][0]
A_ : Any = np.asarray(layer_norm_a[0] )
A_ : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# lsh weights + output
A_ : List[str] = weights[0][1]
if len(_UpperCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
else:
set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
# intermediate weighs
A_ : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(_UpperCAmelCase ) == 4:
A_ : Tuple = intermediate_weights[2]
# layernorm 2
A_ : List[Any] = np.asarray(intermediate_weights[0][0] )
A_ : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# intermediate dense
A_ : Optional[int] = np.asarray(intermediate_weights[1][0] )
A_ : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
# intermediate out
A_ : List[str] = np.asarray(intermediate_weights[4][0] )
A_ : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = torch_model.reformer
# word embeds
A_ : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , )
if isinstance(weights[3] , _UpperCAmelCase ):
A_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A_ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
A_ : Tuple = nn.Parameter(torch.tensor(_UpperCAmelCase ) )
A_ : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_UpperCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A_ : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# output layer norm
A_ : int = np.asarray(weights[7][0] )
A_ : str = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# output embeddings
A_ : Optional[Any] = np.asarray(weights[9][0] )
A_ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = ReformerConfig.from_json_file(_UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
A_ : Optional[Any] = ReformerModelWithLMHead(_UpperCAmelCase )
with open(_UpperCAmelCase , '''rb''' ) as f:
A_ : Union[str, Any] = pickle.load(_UpperCAmelCase )['''weights''']
set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 167 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
assert column_title.isupper()
_lowerCAmelCase = 0
_lowerCAmelCase = len(a_ ) - 1
_lowerCAmelCase = 0
while index >= 0:
_lowerCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , a_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , '''Tatoeba directory does not exist.''' )
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__snake_case )
@slow
def lowercase__ ( self : Dict ) -> int:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 220 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__A : Dict = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 154 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A : Optional[Any] , _A : List[str]=7 ) ->str:
"""simple docstring"""
lowerCamelCase_ =None
if token is not None:
lowerCamelCase_ ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ ="""636036"""
lowerCamelCase_ =f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
lowerCamelCase_ =requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =get_daily_ci_runs(_A )
lowerCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ =workflow_run["""id"""]
break
return workflow_run_id
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCamelCase_ =get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A : int , _A : Any , _A : Optional[int] ) ->List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCamelCase_ ={}
for artifact_name in artifact_names:
lowerCamelCase_ =os.path.join(_A , f'{artifact_name}.zip' )
if os.path.isfile(_A ):
lowerCamelCase_ ={}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCamelCase_ =f.read().decode("""UTF-8""" )
return results
| 154 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
snake_case__ : Optional[Any] = pytest.mark.integration
snake_case__ : Union[str, Any] = {'comet'}
snake_case__ : Tuple = importlib.util.find_spec('fairseq') is not None
snake_case__ : List[str] = {'code_eval'}
snake_case__ : Optional[int] = os.name == 'nt'
snake_case__ : Optional[int] = {'bertscore', 'frugalscore', 'perplexity'}
snake_case__ : Optional[Any] = importlib.util.find_spec('transformers') is not None
def _a ( lowerCamelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
@wraps(lowerCamelCase )
def wrapper(self: Union[str, Any] , lowerCamelCase: Any ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , lowerCamelCase )
return wrapper
def _a ( lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
@wraps(lowerCamelCase )
def wrapper(self: Optional[int] , lowerCamelCase: List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , lowerCamelCase )
return wrapper
def _a ( lowerCamelCase: List[str] ) -> Dict:
'''simple docstring'''
@wraps(lowerCamelCase )
def wrapper(self: Any , lowerCamelCase: Union[str, Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , lowerCamelCase )
return wrapper
def _a ( ) -> List[Any]:
'''simple docstring'''
__A = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@local
class A_ ( parameterized.TestCase ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Dict:
__A = '''[...]'''
__A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _UpperCamelCase ) ).module_path )
__A = datasets.load.import_main_class(metric_module.__name__ , dataset=_UpperCamelCase )
# check parameters
__A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__A = doctest.testmod(_UpperCamelCase , verbose=_UpperCamelCase , raise_on_error=_UpperCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
__A = '''[...]'''
__A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _UpperCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__A = doctest.testmod(_UpperCamelCase , verbose=_UpperCamelCase , raise_on_error=_UpperCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :Dict )-> Any:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCamelCase ):
yield
else:
yield
@contextmanager
def _lowerCAmelCase (self :List[Any] )-> str:
def load_local_metric(_UpperCamelCase :int , *_UpperCamelCase :int , **_UpperCamelCase :List[Any] ):
return load_metric(os.path.join('''metrics''' , _UpperCamelCase ) , *_UpperCamelCase , **_UpperCamelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__A = load_local_metric
yield
@classmethod
def _lowerCAmelCase (cls :Any , _UpperCamelCase :Optional[Any] )-> Dict:
def wrapper(_UpperCamelCase :List[str] ):
__A = contextmanager(_UpperCamelCase )
__A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _a ( lowerCamelCase: Any ) -> Optional[Any]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class A_ ( _lowerCamelCase ):
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[Any] )-> Tuple:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _a ( lowerCamelCase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import torch
def bert_cos_score_idf(lowerCamelCase: Any , lowerCamelCase: Optional[Any] , *lowerCamelCase: Union[str, Any] , **lowerCamelCase: str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _a ( lowerCamelCase: Dict ) -> Any:
'''simple docstring'''
def load_from_checkpoint(lowerCamelCase: Dict ):
class A_ :
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Optional[int] , *_UpperCamelCase :str , **_UpperCamelCase :List[str] )-> Dict:
assert len(_UpperCamelCase ) == 2
__A = [0.1_9, 0.9_2]
return scores, sum(_UpperCamelCase ) / len(_UpperCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__A = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__A = load_from_checkpoint
yield
def _a ( ) -> List[str]:
'''simple docstring'''
__A = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__A = '''ERROR'''
__A = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowerCamelCase , match=re.escape(lowerCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCamelCase )
| 250 |
import math
def _a ( lowerCamelCase: int ) -> int:
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
__A = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase )
if number < 1:
__A = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A = int(math.log(number // 3 , 2 ) ) + 2
__A = [3, 5]
__A = 2
__A = 3
for block in range(1 , lowerCamelCase ):
for _ in range(lowerCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case__ : Optional[Any] = 0
try:
snake_case__ : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 250 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
"""simple docstring"""
a :Tuple = len(UpperCAmelCase_ )
a :Union[str, Any] = len(UpperCAmelCase_ )
a :Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
a :Union[str, Any] = True
for i in range(UpperCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
a :List[str] = True
if a[i].islower():
a :Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class A ( lowerCamelCase_ ):
__UpperCAmelCase : int = '''efficientformer'''
def __init__(self : List[str] , __UpperCAmelCase : List[int] = [3, 2, 6, 4] , __UpperCAmelCase : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , __UpperCAmelCase : List[bool] = [True, True, True, True] , __UpperCAmelCase : int = 4_4_8 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 5 , __UpperCAmelCase : int = 8 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : float = 1E-5 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1E-12 , __UpperCAmelCase : int = 2_2_4 , __UpperCAmelCase : float = 1E-05 , **__UpperCAmelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**__snake_case )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = depths
UpperCAmelCase__ = mlp_expansion_ratio
UpperCAmelCase__ = downsamples
UpperCAmelCase__ = dim
UpperCAmelCase__ = key_dim
UpperCAmelCase__ = attention_ratio
UpperCAmelCase__ = resolution
UpperCAmelCase__ = pool_size
UpperCAmelCase__ = downsample_patch_size
UpperCAmelCase__ = downsample_stride
UpperCAmelCase__ = downsample_pad
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = num_metaad_blocks
UpperCAmelCase__ = distillation
UpperCAmelCase__ = use_layer_scale
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = image_size
UpperCAmelCase__ = batch_norm_eps
| 371 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase__ = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__A, __A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=__A )
with tarfile.TarFile(__A, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / "extracted"
TarExtractor.extract(__A, __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 143 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = ""
for word_or_phrase in separated:
if not isinstance(snake_case__, snake_case__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _UpperCamelCase ( snake_case__, snake_case__ = True, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = False, snake_case__ = 100, snake_case__ = 0.01, snake_case__ = 1, ) -> Any:
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Dict = search_prob
__UpperCAmelCase : Tuple = start_temperate
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = None
while not search_end:
__UpperCAmelCase : str = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCAmelCase : Union[str, Any] = current_state
scores.append(snake_case__ )
iterations += 1
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCAmelCase : str = random.randint(0, len(snake_case__ ) - 1 ) # picking a random neighbor
__UpperCAmelCase : Tuple = neighbors.pop(snake_case__ )
__UpperCAmelCase : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCAmelCase : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCAmelCase : int = picked_neighbor
else:
__UpperCAmelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCAmelCase : Union[str, Any] = picked_neighbor
__UpperCAmelCase : int = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCAmelCase : Optional[Any] = True
else:
__UpperCAmelCase : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__ ), snake_case__ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
return (3 * x**2) - (6 * y)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 157 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ : Any = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = "dummy_data"
lowercase_ = "datasets"
lowercase_ = False
def __init__( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[Version, str] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[List[Callable]] = None , ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = dataset_name
SCREAMING_SNAKE_CASE_ = cache_dir
SCREAMING_SNAKE_CASE_ = use_local_dummy_data
SCREAMING_SNAKE_CASE_ = config
# download_callbacks take a single url as input
SCREAMING_SNAKE_CASE_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
SCREAMING_SNAKE_CASE_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
SCREAMING_SNAKE_CASE_ = str(_lowerCAmelCase )
# to be downloaded
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@property
def lowerCAmelCase_ ( self : Dict ):
if self._dummy_file is None:
SCREAMING_SNAKE_CASE_ = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase_ ( self : str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def lowerCAmelCase_ ( self : List[str] ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
SCREAMING_SNAKE_CASE_ = cached_path(
_lowerCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_lowerCAmelCase , force_extract=_lowerCAmelCase )
return os.path.join(_lowerCAmelCase , self.dummy_file_name )
@property
def lowerCAmelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self._bucket_url is None:
SCREAMING_SNAKE_CASE_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[str] , *_lowerCAmelCase : int ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
SCREAMING_SNAKE_CASE_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
SCREAMING_SNAKE_CASE_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return self.create_dummy_data_dict(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_lowerCAmelCase , _lowerCAmelCase )
else:
return self.create_dummy_data_single(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[str] , *_lowerCAmelCase : Optional[int] ):
return self.download_and_extract(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
return self.download_and_extract(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[Any] ):
return path
def lowerCAmelCase_ ( self : Dict ):
return {}
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for single_url in single_urls:
download_callback(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = single_urls
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [os.path.join(_lowerCAmelCase , urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) ) for x in single_urls]
else:
SCREAMING_SNAKE_CASE_ = single_urls
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) )
SCREAMING_SNAKE_CASE_ = value
# make sure that values are unique
if all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
SCREAMING_SNAKE_CASE_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
SCREAMING_SNAKE_CASE_ = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _lowerCAmelCase ) ) for url in data_url )
SCREAMING_SNAKE_CASE_ = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
SCREAMING_SNAKE_CASE_ = [data_url[0]] * len(_lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_lowerCAmelCase )
return dummy_data_list
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Any ):
def _iter_archive_members(_lowerCAmelCase : Tuple ):
# this preserves the order of the members inside the ZIP archive
SCREAMING_SNAKE_CASE_ = Path(self.dummy_file ).parent
SCREAMING_SNAKE_CASE_ = path.relative_to(_lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
SCREAMING_SNAKE_CASE_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = _iter_archive_members(_lowerCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_lowerCAmelCase ).as_posix(), file_path.open('rb' )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[Any] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [paths]
for path in paths:
if os.path.isfile(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_lowerCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_lowerCAmelCase , _lowerCAmelCase ) | 210 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 210 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = BlenderbotSmallTokenizer
lowercase = False
def __lowercase ( self : List[str] ) -> List[Any]:
super().setUp()
lowerCAmelCase_ : Optional[Any] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCAmelCase_ : Any = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
lowerCAmelCase_ : List[str] = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def __lowercase ( self : Any , **lowerCamelCase : int ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Tuple ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = """adapt act apte"""
lowerCAmelCase_ : List[str] = """adapt act apte"""
return input_text, output_text
def __lowercase ( self : str ) -> int:
lowerCAmelCase_ : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Dict = """adapt act apte"""
lowerCAmelCase_ : Optional[Any] = ["""adapt""", """act""", """ap@@""", """te"""]
lowerCAmelCase_ : int = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase_ : Optional[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
lowerCAmelCase_ : int = """I am a small frog."""
lowerCAmelCase_ : Dict = tok([src_text] , padding=lowerCamelCase , truncation=lowerCamelCase )["""input_ids"""]
lowerCAmelCase_ : int = tok.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
lowerCAmelCase_ : int = """I am a small frog ."""
lowerCAmelCase_ : Dict = """."""
lowerCAmelCase_ : Any = tok(lowerCamelCase )["""input_ids"""]
lowerCAmelCase_ : List[Any] = tok(lowerCamelCase )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 120 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : Any = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase_ : List[str] = int(re.match(R""".*layer_(\d*).*""" , A__ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase_ : List[str] = re.search(R"""[^\d](\d+)$""" , str(A__ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
lowerCAmelCase_ : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase_ ( A__ : int , A__ : Tuple , A__ : Any , A__ : Tuple , A__ : List[str] ):
'''simple docstring'''
if bloom_config_file == "":
lowerCAmelCase_ : Any = BloomConfig()
else:
lowerCAmelCase_ : Any = BloomConfig.from_json_file(A__ )
if shard_model:
lowerCAmelCase_ : Any = os.listdir(A__ )
lowerCAmelCase_ : Optional[int] = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : Optional[Any] = {"""weight_map""": {}, """metadata""": {}}
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(A__ ):
print("""Processing file: {}""".format(A__ ) )
lowerCAmelCase_ : List[str] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : Optional[Any] = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[int] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : str = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : Optional[int] = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Optional[int] = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase_ : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase_ : Tuple = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
lowerCAmelCase_ : str = BloomConfig()
lowerCAmelCase_ : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowerCAmelCase_ : List[Any] = total_size
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : Optional[int] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + """\n"""
f.write(A__ )
else:
lowerCAmelCase_ : int = BloomModel(A__ )
lowerCAmelCase_ : Union[str, Any] = os.listdir(A__ )
lowerCAmelCase_ : Tuple = sorted(filter(lambda A__ : s.startswith("""layer""" ) and "model_00" in s , A__ ) )
lowerCAmelCase_ : List[Any] = None
for i, file in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = None
for i in range(A__ ):
# load all TP files
lowerCAmelCase_ : str = file.replace("""model_00""" , f'model_0{i}' )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(A__ , A__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowerCAmelCase_ : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase_ : str = temp.pop(A__ )
if tensors is None:
lowerCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase_ : Union[str, Any] = tensors[key] / pretraining_tp
lowerCAmelCase_ : Optional[int] = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
lowerCAmelCase_ : Any = set(other_keys.missing_keys )
else:
lowerCAmelCase_ : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
lowerCAmelCase_ : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
lowerCAmelCase_ : Any = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 120 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCAmelCase__ ) * abs(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 101 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( nn.Module ):
lowercase = 42
lowercase = (16, 32, 96, 256)
lowercase = jnp.floataa
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ = self.block_out_channels[i]
A_ = self.block_out_channels[i + 1]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = blocks
A_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.conv_in(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
for block in self.blocks:
A_ = block(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , _snake_case , _snake_case ):
lowercase = 32
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = False
lowercase = (320, 640, 1_280, 1_280)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 1_280
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = "rgb"
lowercase = (16, 32, 96, 256)
def snake_case_ ( self , UpperCamelCase__ ) -> FrozenDict:
'''simple docstring'''
# init input tensors
A_ = (1, self.in_channels, self.sample_size, self.sample_size)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ = jnp.ones((1,) , dtype=jnp.intaa )
A_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ , A_ = jax.random.split(UpperCamelCase__ )
A_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.block_out_channels
A_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ = self.num_attention_heads or self.attention_head_dim
# input
A_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
A_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (num_attention_heads,) * len(self.down_block_types )
# down
A_ = []
A_ = []
A_ = block_out_channels[0]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
A_ = down_blocks
A_ = controlnet_down_blocks
# mid
A_ = block_out_channels[-1]
A_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
A_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ = timesteps.astype(dtype=jnp.floataa )
A_ = jnp.expand_dims(UpperCamelCase__ , 0 )
A_ = self.time_proj(UpperCamelCase__ )
A_ = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.conv_in(UpperCamelCase__ )
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
A_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
A_ = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
A_ = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ = controlnet_down_block_res_samples
A_ = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
A_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 101 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__a = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__a = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __UpperCAmelCase ( a_: List[str], a_: Tuple, a_: List[str] ):
_UpperCAmelCase : List[Any] = SavedModel()
_UpperCAmelCase : str = []
with open(os.path.join(a_, "utils", "tf_ops", "onnx.json" ) ) as f:
_UpperCAmelCase : Optional[Any] = json.load(a_ )["opsets"]
for i in range(1, opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a_ )] )
with open(a_, "rb" ) as f:
saved_model.ParseFromString(f.read() )
_UpperCAmelCase : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCAmelCase : Optional[Any] = sorted(a_ )
_UpperCAmelCase : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a_ )
if strict and len(a_ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a_ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a_, sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 145 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Any = (3_2, 3_2)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Union[str, Any] = DDPMScheduler()
_UpperCAmelCase : str = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
_UpperCAmelCase : Tuple = DDPMScheduler()
_UpperCAmelCase : Dict = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : str = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = output.images
assert image.shape[0] == 2
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase : Tuple = unet.half()
_UpperCAmelCase : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "a cat sitting on a park bench"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = "a cat sitting on a park bench"
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Tuple = "a cat sitting on a park bench"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9 | 145 | 1 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case_ ( ) -> int:
lowercase__: Union[str, Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__a )
lowercase__: Tuple = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
lowercase__: int = parser.parse_args()
if not hasattr(__a , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 355 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1 / 255 , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , ) -> List[str]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Any = num_channels
lowercase__: str = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Any = do_resize
lowercase__: str = size
lowercase__: Any = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Optional[int] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: List[str] = image_std
lowercase__: Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
if not batched:
lowercase__: List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: List[str] = image.size
else:
lowercase__ , lowercase__: str = image.shape[1], image.shape[2]
if w < h:
lowercase__: Optional[int] = int(self.size['shortest_edge'] * h / w )
lowercase__: int = self.size['shortest_edge']
elif w > h:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: int = int(self.size['shortest_edge'] * w / h )
else:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: Optional[Any] = self.size['shortest_edge']
else:
lowercase__: str = []
for image in image_inputs:
lowercase__ , lowercase__: Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__: Dict = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image and target
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__: Optional[int] = json.loads(f.read() )
lowercase__: Optional[Any] = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__: Optional[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowercase__: List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
lowercase__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__: Tuple = json.loads(f.read() )
lowercase__: Tuple = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__: List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowercase__: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__: str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__: Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 288 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''optuna'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ray'''
SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''sigopt'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''wandb'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
_a : Tuple = False
if low == high:
return swapped
_a : Any = low
_a : Any = high
while left < right:
if collection[left] > collection[right]:
_a : Union[str, Any] = (
collection[right],
collection[left],
)
_a : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_a : Dict = (
collection[right + 1],
collection[left],
)
_a : Union[str, Any] = True
_a : List[str] = low + int((high - low) / 2 )
_a : int = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_a : Union[str, Any] = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_a : Optional[int] = True
while is_not_sorted is True:
_a : Optional[Any] = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 362 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324 | 0 |
'''simple docstring'''
from math import pi
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 254 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Union[str, Any] = 0
_snake_case : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : int = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None , ) -> None:
__snake_case : List[str] = pos_x
__snake_case : List[str] = pos_y
__snake_case : Dict = (pos_y, pos_x)
__snake_case : List[Any] = goal_x
__snake_case : Union[str, Any] = goal_y
__snake_case : int = g_cost
__snake_case : List[Any] = parent
__snake_case : Optional[Any] = self.calculate_heuristic()
__snake_case : Union[str, Any] = self.g_cost + self.h_cost
def __snake_case ( self : Optional[int] ) -> float:
__snake_case : Union[str, Any] = self.pos_x - self.goal_x
__snake_case : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , lowerCamelCase : Node ) -> bool:
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> Optional[Any]:
__snake_case : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
__snake_case : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase )
__snake_case : str = [self.start]
__snake_case : list[Node] = []
__snake_case : int = False
def __snake_case ( self : Tuple ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
__snake_case : Tuple = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Any = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node ) -> list[Node]:
__snake_case : int = []
for action in delta:
__snake_case : Tuple = parent.pos_x + action[1]
__snake_case : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def __snake_case ( self : Optional[Any] , lowerCamelCase : Node | None ) -> list[TPosition]:
__snake_case : List[Any] = node
__snake_case : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case : Tuple = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> None:
__snake_case : str = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = AStar(lowerCamelCase , lowerCamelCase )
__snake_case : int = False
def __snake_case ( self : str ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__snake_case : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
__snake_case : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
__snake_case : Optional[Any] = current_bwd_node
__snake_case : Any = current_fwd_node
__snake_case : int = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
__snake_case : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def __snake_case ( self : Any , lowerCamelCase : Node , lowerCamelCase : Node ) -> list[TPosition]:
__snake_case : Optional[int] = self.fwd_astar.retrace_path(lowerCamelCase )
__snake_case : Optional[Any] = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__snake_case : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : Dict = (0, 0)
_snake_case : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : List[Any] = time.time()
_snake_case : Dict = AStar(init, goal)
_snake_case : Optional[int] = a_star.search()
_snake_case : Optional[Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_snake_case : List[str] = time.time()
_snake_case : Any = BidirectionalAStar(init, goal)
_snake_case : List[str] = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 123 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase__ = model_type_to_module_name(A__ )
UpperCAmelCase__ = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A__ , """__name__""" , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase__ = importlib.import_module("""transformers""" )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(A__ , encoding="""utf-8""" ) as reader:
return json.load(A__ )
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] ):
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , _UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop("""config""" , _UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("""trust_remote_code""" , _UpperCAmelCase )
UpperCAmelCase__ = True
UpperCAmelCase__ = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = config_dict.get("""image_processor_type""" , _UpperCAmelCase )
UpperCAmelCase__ = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase__ = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase__ = config_dict.pop("""feature_extractor_type""" , _UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCAmelCase__ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCAmelCase__ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# It could be in `config.image_processor_type``
UpperCAmelCase__ = getattr(_UpperCAmelCase , """image_processor_type""" , _UpperCAmelCase )
if hasattr(_UpperCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase__ = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCAmelCase__ = image_processor_class_from_name(_UpperCAmelCase )
UpperCAmelCase__ = image_processor_auto_map is not None
UpperCAmelCase__ = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase__ = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if has_remote_code and trust_remote_code:
UpperCAmelCase__ = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("""code_revision""" , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase__ = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[str] = u
for i in range(1 , lowerCAmelCase ):
__magic_name__ : str = temp * (u - i)
return temp
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = int(input('enter the numbers of values: ' ) )
__magic_name__ : list[list[float]] = []
for _ in range(lowerCAmelCase ):
y.append([] )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
y[i].append(lowerCAmelCase )
__magic_name__ : Optional[int] = 0
print('enter the values of parameters in a list: ' )
__magic_name__ : Tuple = list(map(lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCAmelCase ):
__magic_name__ : Dict = float(input() )
__magic_name__ : Tuple = int(input('enter the value to interpolate: ' ) )
__magic_name__ : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase ):
for j in range(n - i ):
__magic_name__ : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
__magic_name__ : List[str] = y[0][0]
for i in range(1 , lowerCAmelCase ):
summ += (ucal(lowerCAmelCase , lowerCAmelCase ) * y[0][i]) / math.factorial(lowerCAmelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=[1, 2, 1],__SCREAMING_SNAKE_CASE=[2, 2, 4],__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=2.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=8,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = patch_norm
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = is_training
__lowerCAmelCase = scope
__lowerCAmelCase = use_labels
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = encoder_stride
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : int =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a : int =(
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a : str =False
a : int =False
a : Optional[int] =False
a : Any =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SwinvaModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,embed_dim=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE,nn.Linear ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
__lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = config.window_size**2
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],)
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester,"""num_hidden_states_types""" ):
__lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester,"""expected_num_hidden_layers""",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
__lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reshaped_hidden_states[0].shape
__lowerCAmelCase = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,(padded_height, padded_width) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
| 363 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return getitem, k
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return setitem, k, v
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return delitem, k
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ):
'''simple docstring'''
try:
return fun(lowerCAmelCase__ , *lowerCAmelCase__ ), None
except Exception as e:
return None, e
lowercase__ :Dict = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
lowercase__ :Optional[int] = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
lowercase__ :Optional[Any] = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
lowercase__ :Dict = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
lowercase__ :Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase__ :List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = HashMap(initial_block_size=4 )
lowercase = {}
for _, (fun, *args) in enumerate(lowerCAmelCase__ ):
lowercase , lowercase = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
lowercase , lowercase = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
assert my_res == py_res
assert str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )
assert set(lowerCAmelCase__ ) == set(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase ( ):
'''simple docstring'''
def is_public(lowerCAmelCase__ ) -> bool:
return not name.startswith('''_''' )
lowercase = {name for name in dir({} ) if is_public(lowerCAmelCase__ )}
lowercase = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase__ )}
assert dict_public_names > hash_public_names
| 101 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 101 | 1 |
import os
import pytest
from attr import dataclass
lowercase__ : int = "us-east-1" # defaults region
@dataclass
class a__ :
a : str
a : int = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
a : str = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
a : Dict = {**hyperparameters, """max_steps""": 1000}
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return F'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Tuple:
a = SageMakerTestEnvironment(framework=request.cls.framework)
| 180 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a = k.replace(__UpperCamelCase , __UpperCamelCase)
if k.startswith("encoder"):
a = k.replace(".attn" , ".self_attn")
a = k.replace("norm1" , "self_attn_layer_norm")
a = k.replace("norm2" , "final_layer_norm")
elif k.startswith("decoder"):
a = k.replace("norm1" , "self_attn_layer_norm")
a = k.replace("norm2" , "encoder_attn_layer_norm")
a = k.replace("norm3" , "final_layer_norm")
return k
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
a = sd.pop(__UpperCamelCase)
a = k.replace("layernorm_embedding" , "layer_norm")
assert new_k not in sd
a = v
lowercase__ : Optional[Any] = ["START"]
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
a = torch.load(__UpperCamelCase , map_location="cpu")
a = model["model"]
a = BlenderbotConfig.from_json_file(__UpperCamelCase)
a = BlenderbotForConditionalGeneration(__UpperCamelCase)
a = m.model.state_dict().keys()
a = []
a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a = rename_state_dict_key(__UpperCamelCase)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__UpperCamelCase)
m.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase)
m.half()
m.save_pretrained(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowercase__ : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 180 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = ["""pixel_values"""]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , )-> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: int = size if size is not None else {"shortest_edge": 2_5_6}
__lowerCAmelCase: str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Optional[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: str = do_resize
__lowerCAmelCase: Any = size
__lowerCAmelCase: Dict = resample
__lowerCAmelCase: Tuple = do_center_crop
__lowerCAmelCase: str = crop_size
__lowerCAmelCase: List[Any] = do_rescale
__lowerCAmelCase: int = rescale_factor
__lowerCAmelCase: List[Any] = do_normalize
__lowerCAmelCase: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: int = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__lowerCAmelCase: Optional[Any] = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int])-> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , )-> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[Any] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: str = size if size is not None else self.size
__lowerCAmelCase: Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: List[str] = resample if resample is not None else self.resample
__lowerCAmelCase: str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase: Tuple = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase: List[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase: Tuple = image_std if image_std is not None else self.image_std
__lowerCAmelCase: Union[str, Any] = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase: Tuple = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
__lowerCAmelCase: Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
__lowerCAmelCase: Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
__lowerCAmelCase: Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
__lowerCAmelCase: List[str] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
__lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
__lowerCAmelCase: List[str] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Tuple] = None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(UpperCamelCase__):
__lowerCAmelCase: Optional[int] = target_sizes.numpy()
__lowerCAmelCase: List[Any] = []
for idx in range(len(UpperCamelCase__)):
__lowerCAmelCase: List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCamelCase__)
else:
__lowerCAmelCase: Tuple = logits.argmax(dim=1)
__lowerCAmelCase: Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 217 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = 9, 14 # noqa: F841
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCamelCase = mst(A__ )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCamelCase = tuple(answer[:2] )
__lowerCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 29 |
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.