code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import doctest
from collections import deque
import numpy as np
class a :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__snake_case : List[Any] = [2, 1, 2, -1]
__snake_case : Union[str, Any] = [1, 2, 3, 4]
def __snake_case ( self : int ) -> list[float]:
__snake_case : int = len(self.first_signal )
__snake_case : Union[str, Any] = len(self.second_signal )
__snake_case : Any = max(lowerCamelCase , lowerCamelCase )
# create a zero matrix of max_length x max_length
__snake_case : str = [[0] * max_length for i in range(lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase ):
__snake_case : Tuple = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase )
for j, item in enumerate(lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__snake_case : Optional[int] = np.matmul(np.transpose(lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 81 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
"""simple docstring"""
from math import factorial, radians
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 18 , lowerCAmelCase__ = 10 ):
UpperCAmelCase_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase_ = radians(lowerCAmelCase__ )
UpperCAmelCase_ = angle_in_radians
UpperCAmelCase_ = 3
UpperCAmelCase_ = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
UpperCAmelCase_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 82 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
import requests
UpperCAmelCase = '''''' # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "Chicago" , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "Kolkata, India" , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 55.68 , __SCREAMING_SNAKE_CASE = 12.57 , __SCREAMING_SNAKE_CASE = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 84 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self : List[Any] ):
torch.manual_seed(0 )
A_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __A ( self : int ):
torch.manual_seed(0 )
A_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __A ( self : Optional[Any] ):
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.dummy_uncond_unet
A_ = DDIMScheduler()
A_ = self.dummy_vq_model
A_ = LDMPipeline(unet=UpperCAmelCase , vqvae=UpperCAmelCase , scheduler=UpperCAmelCase )
ldm.to(UpperCAmelCase )
ldm.set_progress_bar_config(disable=UpperCAmelCase )
A_ = torch.manual_seed(0 )
A_ = ldm(generator=UpperCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
A_ = torch.manual_seed(0 )
A_ = ldm(generator=UpperCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=UpperCAmelCase )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
A_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ):
A_ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCAmelCase )
ldm.set_progress_bar_config(disable=UpperCAmelCase )
A_ = torch.manual_seed(0 )
A_ = ldm(generator=UpperCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
A_ = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 86 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ['''bs4'''])
super().__init__(**UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict) ->Optional[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A__ = parent.find_all(child.name , recursive=UpperCAmelCase__)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(UpperCAmelCase__) else next(i for i, s in enumerate(UpperCAmelCase__ , 1) if s is child))
A__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
A__ = BeautifulSoup(UpperCAmelCase__ , '''html.parser''')
A__ = []
A__ = []
A__ = []
for element in html_code.descendants:
if type(UpperCAmelCase__) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
A__ = html.unescape(UpperCAmelCase__).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCAmelCase__)
A__ , A__ = self.xpath_soup(UpperCAmelCase__)
stringaxtag_seq.append(UpperCAmelCase__)
stringaxsubs_seq.append(UpperCAmelCase__)
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError('''Number of doc strings and xtags does not correspond''')
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError('''Number of doc strings and xsubs does not correspond''')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ = ''''''
for tagname, subs in zip(UpperCAmelCase__ , UpperCAmelCase__):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : Optional[Any] , UpperCAmelCase__ : Tuple) ->BatchFeature:
'''simple docstring'''
A__ = False
# Check that strings has a valid type
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = True
elif isinstance(UpperCAmelCase__ , (list, tuple)):
if len(UpperCAmelCase__) == 0 or isinstance(html_strings[0] , UpperCAmelCase__):
A__ = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(UpperCAmelCase__)}.""")
A__ = bool(isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase__)))
if not is_batched:
A__ = [html_strings]
# Get nodes + xpaths
A__ = []
A__ = []
for html_string in html_strings:
A__ , A__ , A__ = self.get_three_from_single(UpperCAmelCase__)
nodes.append(UpperCAmelCase__)
A__ = []
for node, tag_list, sub_list in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__):
A__ = self.construct_xpath(UpperCAmelCase__ , UpperCAmelCase__)
xpath_strings.append(UpperCAmelCase__)
xpaths.append(UpperCAmelCase__)
# return as Dict
A__ = {'''nodes''': nodes, '''xpaths''': xpaths}
A__ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__)
return encoded_inputs
| 87 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase = """|""".join(sys.argv[1:])
UpperCAmelCase = re.compile(rf'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 88 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE : int = False
class _lowerCamelCase( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = 'A painting of a squirrel eating a burger '
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = generator.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=2, output_type='numpy').images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion', torch_dtype=torch.floataa)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Union[str, Any] = 'A painting of a squirrel eating a burger '
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : Optional[int] = pipe(
prompt=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='numpy').images
_lowercase : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : List[str] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 89 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = {'''facebook/bart-base''': BartForConditionalGeneration}
__UpperCAmelCase = {'''facebook/bart-base''': BartTokenizer}
def _snake_case ( ) -> Any:
lowerCAmelCase__ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=A , default=A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=A , default=A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A , )
parser.add_argument(
'''--config_name''' , type=A , default=A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=A , default=A , help='''Where to store the final ONNX file.''' )
lowerCAmelCase__ = parser.parse_args()
return args
def _snake_case ( A , A="cpu" ) -> int:
lowerCAmelCase__ = model_dict[model_name].from_pretrained(A ).to(A )
lowerCAmelCase__ = tokenizer_dict[model_name].from_pretrained(A )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
return huggingface_model, tokenizer
def _snake_case ( A , A , A , A , A ) -> Union[str, Any]:
model.eval()
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(A ) )
with torch.no_grad():
lowerCAmelCase__ = '''My friends are cool but they eat too many carbs.'''
lowerCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
lowerCAmelCase__ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=A , max_length=A , early_stopping=A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=A , )
logger.info('''Model exported to {}'''.format(A ) )
lowerCAmelCase__ = remove_dup_initializers(os.path.abspath(A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(A ) )
lowerCAmelCase__ = onnxruntime.InferenceSession(A )
lowerCAmelCase__ = ort_sess.run(
A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(A ),
'''max_length''': np.array(A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = parse_args()
lowerCAmelCase__ = 5
lowerCAmelCase__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = torch.device(args.device )
lowerCAmelCase__ , lowerCAmelCase__ = load_model_tokenizer(args.model_name_or_path , A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(A )
if args.max_length:
lowerCAmelCase__ = args.max_length
if args.num_beams:
lowerCAmelCase__ = args.num_beams
if args.output_file_path:
lowerCAmelCase__ = args.output_file_path
else:
lowerCAmelCase__ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(A , A , A , A , A )
if __name__ == "__main__":
main() | 90 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(snake_case__ , 0 , snake_case__ , args=(snake_case__) )[0]
def _snake_case ( snake_case__ : float , snake_case__ : float ):
return math.pow(snake_case__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 91 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =parent
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {}
def _lowerCAmelCase ( ) -> List[str]:
lowercase : int ='''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
lowercase : Dict ='''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize feature_extractor
lowercase : Dict =self.feature_extraction_class()
# Test not batched input
lowercase : List[str] =get_html_strings()[0]
lowercase : str =feature_extractor(UpperCAmelCase__ )
# fmt: off
lowercase : Dict =[['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
lowercase : Any =[['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
lowercase : int =get_html_strings()
lowercase : Union[str, Any] =feature_extractor(UpperCAmelCase__ )
# fmt: off
lowercase : Dict =expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
lowercase : List[str] =expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 92 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = ["""image_processor""", """tokenizer"""]
__magic_name__ :Optional[Any] = """BlipImageProcessor"""
__magic_name__ :Optional[int] = """AutoTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
# add QFormer tokenizer
lowerCAmelCase__ :Union[str, Any] = qformer_tokenizer
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowerCAmelCase__ :str = BatchFeature()
if text is not None:
lowerCAmelCase__ :Dict = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
encoding.update(__UpperCAmelCase )
lowerCAmelCase__ :Any = self.qformer_tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = qformer_text_encoding.pop('input_ids' )
lowerCAmelCase__ :Optional[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowerCAmelCase__ :Optional[int] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer.model_input_names
lowerCAmelCase__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if os.path.isfile(__UpperCAmelCase ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ :str = os.path.join(__UpperCAmelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__UpperCAmelCase )
return super().save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained(__UpperCAmelCase , subfolder='qformer_tokenizer' )
lowerCAmelCase__ :List[str] = cls._get_arguments_from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
args.append(__UpperCAmelCase )
return cls(*__UpperCAmelCase )
| 93 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : int=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : str=16 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : List[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =parent
lowercase : Any =batch_size
lowercase : Union[str, Any] =seq_length
lowercase : Optional[Any] =is_training
lowercase : List[str] =use_attention_mask
lowercase : Dict =use_token_type_ids
lowercase : List[Any] =use_labels
lowercase : List[Any] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Dict =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : List[str] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Tuple =max_position_embeddings
lowercase : List[str] =type_vocab_size
lowercase : Union[str, Any] =type_sequence_label_size
lowercase : List[Any] =initializer_range
lowercase : List[str] =num_choices
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_attention_mask:
lowercase : str =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[int] =None
if self.use_token_type_ids:
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Tuple =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : Optional[Any] =True
lowercase : str =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Any =model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase )
lowercase : int =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase )
lowercase : Any =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 11, 5_0265]
self.assertEqual(list(output.shape ) , UpperCAmelCase )
# compare the actual values for a slice.
lowercase : Optional[Any] =np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase )
lowercase : Optional[Any] =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowercase : str =model(UpperCAmelCase )[0]
# compare the actual values for a slice.
lowercase : List[str] =np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 94 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = TypeVar('''U''')
class UpperCamelCase_ (Generic[T, U] ):
def __init__( self : Tuple , lowerCAmelCase_ : T | None , lowerCAmelCase_ : U | None ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = key
UpperCAmelCase_ : Optional[int] = val
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : List[str] ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCamelCase_ (Generic[T, U] ):
def __init__( self : str ) -> None:
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.rear, self.head
def __repr__( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Tuple = ["DoubleLinkedList"]
UpperCAmelCase_ : List[Any] = self.head
while node.next is not None:
rep.append(str(lowerCAmelCase_ ) )
UpperCAmelCase_ : Tuple = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : DoubleLinkedListNode[T, U] ) -> None:
UpperCAmelCase_ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : Any = previous
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : Dict = self.rear
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
UpperCAmelCase_ : List[Any] = node.next
UpperCAmelCase_ : Optional[Any] = node.prev
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : str = None
return node
class UpperCamelCase_ (Generic[T, U] ):
__magic_name__ = {}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> List[Any]:
UpperCAmelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCAmelCase_ : List[str] = capacity
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : int ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[Any] , lowerCAmelCase_ : T ) -> bool:
return key in self.cache
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCAmelCase_ : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase_ )
return node.val
self.miss += 1
return None
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : T , lowerCAmelCase_ : U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCAmelCase_ : str = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCAmelCase_ : List[Any] = DoubleLinkedListNode(lowerCAmelCase_ , lowerCAmelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCAmelCase_ : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCAmelCase_ : str = value
self.list.add(lowerCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase_ : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCAmelCase_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCAmelCase_ : Any = LRUCache(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCAmelCase_ : str = func(*lowerCAmelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCAmelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase_ , "cache_info" , lowerCAmelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "gpt_neo"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __snake_case : Tuple=5_0_2_5_7 , __snake_case : Optional[int]=2_0_4_8 , __snake_case : List[str]=2_0_4_8 , __snake_case : List[Any]=2_4 , __snake_case : List[str]=[[["global", "local"], 1_2]] , __snake_case : Union[str, Any]=1_6 , __snake_case : str=None , __snake_case : List[Any]=2_5_6 , __snake_case : Any="gelu_new" , __snake_case : Any=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=True , __snake_case : Any=5_0_2_5_6 , __snake_case : str=5_0_2_5_6 , **__snake_case : Any , ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = vocab_size
__magic_name__: List[str] = max_position_embeddings
__magic_name__: List[str] = hidden_size
__magic_name__: List[Any] = num_layers
__magic_name__: Dict = num_heads
__magic_name__: int = intermediate_size
__magic_name__: Tuple = window_size
__magic_name__: List[str] = activation_function
__magic_name__: List[str] = resid_dropout
__magic_name__: List[Any] = embed_dropout
__magic_name__: Any = attention_dropout
__magic_name__: int = classifier_dropout
__magic_name__: Any = layer_norm_epsilon
__magic_name__: Tuple = initializer_range
__magic_name__: Any = use_cache
__magic_name__: Any = bos_token_id
__magic_name__: int = eos_token_id
__magic_name__: Optional[Any] = attention_types
__magic_name__: List[Any] = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[Any] ) -> Any:
__magic_name__: List[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> Dict:
import torch
__magic_name__: List[str] = input.size()
__magic_name__: Dict = len(__UpperCAmelCase )
__magic_name__: Tuple = shape[dimension]
__magic_name__: Tuple = torch.arange(0 , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = torch.div(sizedim - size , __UpperCAmelCase , rounding_mode="""floor""" ) + 1
__magic_name__: Optional[int] = torch.arange(__UpperCAmelCase ) + low_indices[:min_length][:, None]
__magic_name__: Optional[int] = [slice(__UpperCAmelCase )] * rank
__magic_name__: str = indices
__magic_name__: Optional[Any] = input[s]
__magic_name__: List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> List[str]:
import torch
__magic_name__: Union[str, Any] = torch.arange(1 , __UpperCAmelCase )
__magic_name__: str = torch.remainder(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[str] = remainders == 0
__magic_name__: Any = candidates[divisor_indices]
__magic_name__: str = torch.max(__UpperCAmelCase )
return largest_divisor, torch.div(__UpperCAmelCase , __UpperCAmelCase , rounding_mode="""floor""" )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
__magic_name__: Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
__magic_name__: Union[str, Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__magic_name__: List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
return self._config.num_heads
def lowerCamelCase__ ( self : str , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__: Dict = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
__magic_name__: Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__, __magic_name__: Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__: Optional[int] = seqlen + 2
__magic_name__: Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__: int = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
__magic_name__: Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__magic_name__: Optional[Any] = ordered_inputs["""attention_mask"""].dtype
__magic_name__: Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
return 1_3
| 96 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
from jiwer import compute_measures
import datasets
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> str:
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["wer"]
else:
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 97 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'umt5'
_snake_case : Dict = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=250112 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Optional[int]=64 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : Optional[int]=8 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=6 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Any=128 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=1e-6 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : Union[str, Any]="gated-gelu" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any="T5Tokenizer" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : Dict , ) -> int:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCAmelCase__ , tokenizer_class=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = feed_forward_proj
_UpperCamelCase = use_cache
_UpperCamelCase = self.feed_forward_proj.split('''-''' )
_UpperCamelCase = act_info[-1]
_UpperCamelCase = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_UpperCamelCase = '''gelu_new'''
@property
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.d_model
@property
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.num_heads
@property
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.num_layers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_UpperCamelCase = '''past_encoder_sequence + sequence'''
_UpperCamelCase = {0: '''batch'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
return 13
@property
def snake_case__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 5e-4
| 98 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def a (lowerCAmelCase__ ):
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
"""repo_id""": str(lowerCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def a (lowerCAmelCase__ ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["""WORLD_SIZE"""] )
__a = int(os.environ["""N_GPU_NODE"""] )
__a = int(os.environ["""RANK"""] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def a (lowerCAmelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 99 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A : int = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 5_00
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(A_ )
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''test-feature-extractor''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 100 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A__, A__, A__ ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ : str = RemBertConfig.from_json_file(A__ )
print('Building PyTorch model from configuration: {}'.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE_ : Dict = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__, A__, A__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(A__ ) )
torch.save(model.state_dict(), A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 101 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : Dict = """FlavaImageProcessor"""
__lowerCAmelCase : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
UpperCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
UpperCamelCase : Union[str, Any] = kwargs.pop("""feature_extractor""" )
UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_A , _A )
UpperCamelCase : List[str] = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = False , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase : List[Any] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if images is not None:
UpperCamelCase : int = self.image_processor(
_A , return_image_mask=_A , return_codebook_pixels=_A , return_tensors=_A , **_A , )
if text is not None and images is not None:
encoding.update(_A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _a ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
@property
def _a ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _A , )
return self.image_processor
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = int(lowerCAmelCase_ )
assert noofclusters < len(lowerCAmelCase_ )
# Find out the dimensionality
_snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
_snake_case = list(range(len(lowerCAmelCase_ ) ) )
shuffle(lowerCAmelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCAmelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_snake_case = tf.placeholder('''float64''' , [dim] )
_snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCAmelCase_ , lowerCAmelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_snake_case = [tf.Variable(0 ) for i in range(len(lowerCAmelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_snake_case = tf.placeholder('''int32''' )
_snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCAmelCase_ , lowerCAmelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_snake_case = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_snake_case = tf.reduce_mean(lowerCAmelCase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_snake_case = tf.placeholder('''float''' , [dim] )
_snake_case = tf.placeholder('''float''' , [dim] )
_snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCAmelCase_ , lowerCAmelCase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_snake_case = tf.placeholder('''float''' , [noofclusters] )
_snake_case = tf.argmin(lowerCAmelCase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCAmelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_snake_case = 100
for _ in range(lowerCAmelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCAmelCase_ ) ):
_snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_snake_case = [
sess.run(lowerCAmelCase_ , feed_dict={va: vect, va: sess.run(lowerCAmelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_snake_case = sess.run(
lowerCAmelCase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCAmelCase_ ):
# Collect all the vectors assigned to this cluster
_snake_case = [
vectors[i]
for i in range(len(lowerCAmelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_snake_case = sess.run(
lowerCAmelCase_ , feed_dict={mean_input: array(lowerCAmelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_snake_case = sess.run(lowerCAmelCase_ )
_snake_case = sess.run(lowerCAmelCase_ )
return centroids, assignments
| 103 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = False
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
A__ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , d_model=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , feed_forward_proj=SCREAMING_SNAKE_CASE__ , is_decoder=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , )
A__ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
A__ = TaBlock(SCREAMING_SNAKE_CASE__ )
self.encoders.append(SCREAMING_SNAKE_CASE__ )
A__ = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.token_embedder(SCREAMING_SNAKE_CASE__ )
A__ = encoder_input_tokens.shape[1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE__ )
A__ = self.dropout_pre(SCREAMING_SNAKE_CASE__ )
# inverted the attention mask
A__ = encoder_input_tokens.size()
A__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for lyr in self.encoders:
A__ = lyr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
A__ = self.layer_norm(SCREAMING_SNAKE_CASE__ )
return self.dropout_post(SCREAMING_SNAKE_CASE__ ), encoder_inputs_mask
| 104 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9
SCREAMING_SNAKE_CASE_ : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ : Dict = kruskal(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase_ ) == sorted(lowerCamelCase_ )
| 105 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
A_ : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
A_ : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments,) )
((A) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A = True
A = True
A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A = decoder_config.decoder_start_token_id
A = decoder_config.pad_token_id
if decoder_start_token_id is None:
A = decoder_config.bos_token_id
if pad_token_id is None:
A = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A = decoder_config.eos_token_id
A = decoder_start_token_id
A = pad_token_id
A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 106 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple=13, UpperCamelCase__ : int=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : str=99, UpperCamelCase__ : List[str]=24, UpperCamelCase__ : Tuple=2, UpperCamelCase__ : Union[str, Any]=6, UpperCamelCase__ : int=37, UpperCamelCase__ : Dict="gelu", UpperCamelCase__ : int=0.1, UpperCamelCase__ : str=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Dict=16, UpperCamelCase__ : int=2, UpperCamelCase__ : Union[str, Any]=0.02, UpperCamelCase__ : Optional[Any]=3, UpperCamelCase__ : Any=None, UpperCamelCase__ : Tuple=10_00, ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = scope
_A = range_bbox
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = t
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def __UpperCAmelCase ( self : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, ) -> Dict:
_A = LiltModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, bbox=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, bbox=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, bbox=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], ) -> Union[str, Any]:
_A = self.num_labels
_A = LiltForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(
UpperCamelCase__, bbox=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any], ) -> List[str]:
_A = LiltForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_A = model(
UpperCamelCase__, bbox=UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__, start_positions=UpperCamelCase__, end_positions=UpperCamelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : int, UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
return True
def __UpperCAmelCase ( self : int ) -> int:
_A = LiltModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = LiltModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_A = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCamelCase__ )
_A = torch.tensor([[1, 2]], device=UpperCamelCase__ )
_A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=UpperCamelCase__ )
# forward pass
with torch.no_grad():
_A = model(input_ids=UpperCamelCase__, bbox=UpperCamelCase__ )
_A = torch.Size([1, 2, 7_68] )
_A = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=UpperCamelCase__, )
self.assertTrue(outputs.last_hidden_state.shape, UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], UpperCamelCase__, atol=1e-3 ) )
| 107 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import socket
def _SCREAMING_SNAKE_CASE ( ) -> int:
_UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_UpperCAmelCase = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(__snake_case )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main() | 108 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = b.T
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
__SCREAMING_SNAKE_CASE = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = x.reshape(-1 , 3 )
__SCREAMING_SNAKE_CASE = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class __a ( _snake_case ):
__UpperCamelCase : Any = ['pixel_values']
def __init__( self : Any ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 256, """width""": 256}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase ) if clusters is not None else None
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowerCamelCase ,size=(size["""height"""], size["""width"""]) ,resample=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = rescale(image=lowerCamelCase ,scale=1 / 127.5 ,data_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = image - 1
return image
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : ImageInput ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = None ,lowerCamelCase : bool = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase ,size=lowerCamelCase ,resample=lowerCamelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = color_quantize(lowerCamelCase ,lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__SCREAMING_SNAKE_CASE = images.shape[0]
__SCREAMING_SNAKE_CASE = images.reshape(lowerCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,lowerCamelCase ) for image in images]
__SCREAMING_SNAKE_CASE = {"""input_ids""": images}
return BatchFeature(data=lowerCamelCase ,tensor_type=lowerCamelCase )
| 109 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 0 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(_snake_case )
if ret % 2 == 0:
cuts.append(_snake_case )
return ret
def lowerCamelCase ( ):
dfs(1 )
if __name__ == "__main__":
UpperCamelCase__ , UpperCamelCase__ = 10, 9
UpperCamelCase__ = defaultdict(list)
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 110 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[int] = CycleDiffusionPipeline
__lowercase :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowercase :Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowercase :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
__lowercase :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase :Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_000 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase_ = CLIPTextModel(_lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowerCamelCase_ = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(_lowercase )
else:
lowerCamelCase_ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowerCamelCase_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = CycleDiffusionPipeline(**_lowercase )
lowerCamelCase_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowerCamelCase_ = self.get_dummy_inputs(_lowercase )
lowerCamelCase_ = pipe(**_lowercase )
lowerCamelCase_ = output.images
lowerCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
lowerCamelCase_ = module.half()
lowerCamelCase_ = CycleDiffusionPipeline(**_lowercase )
lowerCamelCase_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowerCamelCase_ = self.get_dummy_inputs(_lowercase )
lowerCamelCase_ = pipe(**_lowercase )
lowerCamelCase_ = output.images
lowerCamelCase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
lowerCamelCase_ = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
lowerCamelCase_ = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = '''A black colored car'''
lowerCamelCase_ = '''A blue colored car'''
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
lowerCamelCase_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
lowerCamelCase_ = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
lowerCamelCase_ = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = '''A black colored car'''
lowerCamelCase_ = '''A blue colored car'''
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
lowerCamelCase_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 142 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> list:
if any(not isinstance(A__, A__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(A__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A__, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 238 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_lowerCamelCase = {
'facebook/blenderbot_small-90M': 5_12,
}
class a ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = BlenderbotSmallTokenizer
def __init__( self : List[Any] , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : List[Any]="<|endoftext|>" , __snake_case : List[Any]="<|endoftext|>" , __snake_case : Any="<|endoftext|>" , __snake_case : int=False , __snake_case : Optional[int]=True , **__snake_case : List[Any] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_lowercase , merges=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , ) , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , **_lowercase , )
UpperCAmelCase_ = add_prefix_space
def lowerCamelCase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int]=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 144 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=A__ )
_A = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=A__ )
env_command_parser(subparsers=A__ )
launch_command_parser(subparsers=A__ )
tpu_command_parser(subparsers=A__ )
test_command_parser(subparsers=A__ )
# Let's go
_A = parser.parse_args()
if not hasattr(A__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 27 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 0 |
from collections import Counter
from timeit import timeit
def _SCREAMING_SNAKE_CASE ( __lowercase : Any = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple = "" ) -> bool:
"""simple docstring"""
if len(A__ ) == 0:
return True
__A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__A = {}
for character in lower_case_input_str:
__A = character_freq_dict.get(A__ , 0 ) + 1
__A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] = "" ) -> None:
"""simple docstring"""
print("""\nFor string = """ , A__ , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(A__ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(A__ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
__a : Tuple = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__a : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 637 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase__: List[str] = logging.get_logger(__name__)
class snake_case_ ( _UpperCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 345 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCamelCase( __snake_case ) -> Dict:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__snake_case = model_type_to_module_name(A__ )
__snake_case = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , "__name__" , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__snake_case = importlib.import_module("transformers" )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _lowerCamelCase( __snake_case , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , **__snake_case , ) -> List[Any]:
__snake_case = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(A__ , encoding="utf-8" ) as reader:
return json.load(A__ )
class UpperCamelCase :
def __init__( self : Any ):
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase_ ( cls : List[str] ,_lowerCAmelCase : Optional[int] ,**_lowerCAmelCase : Tuple ):
"""simple docstring"""
__snake_case = kwargs.pop("config" ,_lowercase )
__snake_case = kwargs.pop("trust_remote_code" ,_lowercase )
__snake_case = True
__snake_case = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase ,**_lowercase )
__snake_case = config_dict.get("feature_extractor_type" ,_lowercase )
__snake_case = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
__snake_case = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase ,_lowercase ):
__snake_case = AutoConfig.from_pretrained(_lowercase ,**_lowercase )
# It could be in `config.feature_extractor_type``
__snake_case = getattr(_lowercase ,"feature_extractor_type" ,_lowercase )
if hasattr(_lowercase ,"auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__snake_case = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__snake_case = feature_extractor_class_from_name(_lowercase )
__snake_case = feature_extractor_auto_map is not None
__snake_case = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
__snake_case = resolve_trust_remote_code(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if has_remote_code and trust_remote_code:
__snake_case = get_class_from_dynamic_module(
_lowercase ,_lowercase ,**_lowercase )
__snake_case = kwargs.pop("code_revision" ,_lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase ,**_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase ,**_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
__snake_case = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase ,**_lowercase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase : Dict ,_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(_lowercase ,_lowercase )
| 524 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase__ ( self , _lowercase = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 5_1_2 , _lowercase = 5_1_2 , _lowercase = 5_0 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , _lowercase = None , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
snake_case_ : str = 1
elif isinstance(_lowercase , _lowercase ):
snake_case_ : Any = len(_lowercase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_lowercase )}.' )
# get prompt text embeddings
snake_case_ : Any = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
snake_case_ : int = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case_ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ : Optional[Any] = text_embeddings.shape
snake_case_ : Any = text_embeddings.repeat(1 , _lowercase , 1 )
snake_case_ : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ : List[str]
if negative_prompt is None:
snake_case_ : Any = ['''''']
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !='
f' {type(_lowercase )}.' )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
snake_case_ : Any = negative_prompt
snake_case_ : Tuple = text_input_ids.shape[-1]
snake_case_ : List[Any] = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
snake_case_ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : int = uncond_embeddings.shape[1]
snake_case_ : List[Any] = uncond_embeddings.repeat(_lowercase , _lowercase , 1 )
snake_case_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
snake_case_ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ : str = torch.randn(
_lowercase , generator=_lowercase , device="""cpu""" , dtype=_lowercase ).to(self.device )
snake_case_ : List[str] = torch.randn(_lowercase , generator=_lowercase , device="""cpu""" , dtype=_lowercase ).to(
self.device )
else:
snake_case_ : str = torch.randn(
_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
snake_case_ : Optional[int] = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
snake_case_ : Optional[int] = latents_reference.to(self.device )
snake_case_ : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case_ : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case_ : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case_ : str = 0 if dx < 0 else dx
snake_case_ : int = 0 if dy < 0 else dy
snake_case_ : Any = max(-dx , 0 )
snake_case_ : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case_ : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Any = {}
if accepts_eta:
snake_case_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : str = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
snake_case_ : Optional[Any] = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ : List[Any] = noise_pred.chunk(2 )
snake_case_ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : List[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
snake_case_ : Dict = 1 / 0.1_8215 * latents
snake_case_ : Dict = self.vae.decode(_lowercase ).sample
snake_case_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case_ : Any = self.feature_extractor(self.numpy_to_pil(_lowercase ) , return_tensors="""pt""" ).to(
self.device )
snake_case_ : Any = self.safety_checker(
images=_lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case_ : Optional[int] = None
if output_type == "pil":
snake_case_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 58 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=13, _a=2, _a=24, _a=16, _a=True, _a=True, _a=32, _a=5, _a=4, _a=37, _a="gelu", _a=0.1, _a=0.1, _a=10, _a=0.02, _a=None, _a=2, _a=2, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = num_mel_bins
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = frequency_stride
__SCREAMING_SNAKE_CASE = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__SCREAMING_SNAKE_CASE = (self.max_length - self.patch_size) // self.time_stride + 1
__SCREAMING_SNAKE_CASE = frequency_out_dimension * time_out_dimension
__SCREAMING_SNAKE_CASE = num_patches + 2
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self ) -> str:
return ASTConfig(
patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_lowercase, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, )
def __lowerCAmelCase ( self, _a, _a, _a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = ASTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__SCREAMING_SNAKE_CASE = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ =(
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a ) -> Dict:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = ASTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_lowercase, has_text_modality=_lowercase, hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase, nn.Linear ) )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''input_values''']
self.assertListEqual(arg_names[:1], _lowercase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = ASTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = torchaudio.load(A__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.default_feature_extractor
__SCREAMING_SNAKE_CASE = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_lowercase )
__SCREAMING_SNAKE_CASE = self.default_feature_extractor
__SCREAMING_SNAKE_CASE = prepare_audio()
__SCREAMING_SNAKE_CASE = audio.squeeze().numpy()
__SCREAMING_SNAKE_CASE = feature_extractor(_lowercase, sampling_rate=_lowercase, return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**_lowercase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape, _lowercase )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowercase, atol=1E-4 ) )
| 693 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a__ : Tuple = None
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
a__ : Tuple = {
'camembert-base': 512,
}
a__ : Dict = '▁'
class __snake_case ( _UpperCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CamembertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case__ = vocab_file
snake_case__ = False if not self.vocab_file else True
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> str:
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 368 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''van'''
def __init__( self : List[Any] , UpperCAmelCase__ : int=224 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Tuple=[7, 3, 3, 3] , UpperCAmelCase__ : str=[4, 2, 2, 2] , UpperCAmelCase__ : Tuple=[64, 128, 320, 512] , UpperCAmelCase__ : List[str]=[3, 3, 12, 3] , UpperCAmelCase__ : Union[str, Any]=[8, 8, 4, 4] , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=1e-6 , UpperCAmelCase__ : Optional[Any]=1e-2 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Tuple=0.0 , **UpperCAmelCase__ : Tuple , ) ->Optional[Any]:
super().__init__(**_lowercase )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = mlp_ratios
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = dropout_rate
| 390 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase_ ( ):
lowerCamelCase_ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
lowerCamelCase_ = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCamelCase_ = args.func(A__ )
service.run()
if __name__ == "__main__":
main() | 142 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase ( _lowerCAmelCase : Any ) -> str:
_UpperCAmelCase : Dict = SwinConfig(image_size=192 )
if "base" in model_name:
_UpperCAmelCase : Optional[Any] = 6
_UpperCAmelCase : str = 128
_UpperCAmelCase : Optional[Any] = (2, 2, 18, 2)
_UpperCAmelCase : List[Any] = (4, 8, 16, 32)
elif "large" in model_name:
_UpperCAmelCase : Dict = 12
_UpperCAmelCase : Union[str, Any] = 192
_UpperCAmelCase : List[str] = (2, 2, 18, 2)
_UpperCAmelCase : str = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
_UpperCAmelCase : int = window_size
_UpperCAmelCase : int = embed_dim
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : Dict = num_heads
return config
def UpperCamelCase ( _lowerCAmelCase : Dict ) -> Union[str, Any]:
if "encoder.mask_token" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""encoder.mask_token""", """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase : str = name.replace("""encoder.patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""encoder.patch_embed.norm""", """embeddings.norm""" )
if "attn.proj" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
_UpperCAmelCase : int = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase : Tuple = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
_UpperCAmelCase : int = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase : Tuple = name.replace("""mlp.fc2""", """output.dense""" )
if name == "encoder.norm.weight":
_UpperCAmelCase : int = '''layernorm.weight'''
if name == "encoder.norm.bias":
_UpperCAmelCase : List[str] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
_UpperCAmelCase : List[str] = '''swin.''' + name
return name
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Any = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase : Union[str, Any] = key.split(""".""" )
_UpperCAmelCase : Optional[int] = int(key_split[2] )
_UpperCAmelCase : Dict = int(key_split[4] )
_UpperCAmelCase : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase : int = val[:dim, :]
_UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
_UpperCAmelCase : Dict = val[-dim:, :]
else:
_UpperCAmelCase : Optional[Any] = val[
:dim
]
_UpperCAmelCase : Optional[int] = val[
dim : dim * 2
]
_UpperCAmelCase : List[Any] = val[
-dim:
]
else:
_UpperCAmelCase : Tuple = val
return orig_state_dict
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : List[str] ) -> List[str]:
_UpperCAmelCase : Tuple = torch.load(A__, map_location="""cpu""" )['''model''']
_UpperCAmelCase : List[Any] = get_swin_config(A__ )
_UpperCAmelCase : Union[str, Any] = SwinForMaskedImageModeling(A__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = convert_state_dict(A__, A__ )
model.load_state_dict(A__ )
_UpperCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase : Dict = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
_UpperCAmelCase : Any = Image.open(requests.get(A__, stream=A__ ).raw )
_UpperCAmelCase : List[str] = image_processor(images=A__, return_tensors="""pt""" )
with torch.no_grad():
_UpperCAmelCase : Any = model(**A__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 238 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Any = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCamelCase( _UpperCAmelCase ):
'''simple docstring'''
__magic_name__ = '''nllb-moe'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , snake_case_=12_8112 , snake_case_=1024 , snake_case_=12 , snake_case_=4096 , snake_case_=16 , snake_case_=12 , snake_case_=4096 , snake_case_=16 , snake_case_=0.05 , snake_case_=0.05 , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=1024 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_="float32" , snake_case_=False , snake_case_=128 , snake_case_=64 , snake_case_=4 , snake_case_=4 , snake_case_=0.001 , snake_case_=0.001 , snake_case_="all" , snake_case_=False , snake_case_=False , snake_case_=1.0 , snake_case_=0.2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=False , **snake_case_ , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = router_z_loss_coef
_A = router_aux_loss_coef
_A = decoder_sparse_step
_A = encoder_sparse_step
_A = num_experts
_A = expert_capacity
_A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
_A = router_dtype
_A = router_ignore_padding_tokens
_A = batch_prioritized_routing
_A = second_expert_policy
_A = normalize_router_prob_before_dropping
_A = moe_eval_capacity_token_fraction
_A = moe_token_dropout
_A = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 27 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__a : Dict = 'CompVis/stable-diffusion-v1-1'
__a : str = 'CompVis/stable-diffusion-v1-2'
__a : List[str] = 'CompVis/stable-diffusion-v1-3'
__a : Dict = 'CompVis/stable-diffusion-v1-4'
class __lowercase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
super()._init_()
__A = StableDiffusionPipeline.from_pretrained(_lowercase )
__A = StableDiffusionPipeline.from_pretrained(_lowercase )
__A = StableDiffusionPipeline.from_pretrained(_lowercase )
__A = StableDiffusionPipeline(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , requires_safety_checker=_lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return {k: getattr(self , _lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : int , ):
"""simple docstring"""
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : int , ):
"""simple docstring"""
__A = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__A = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__A = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__A = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__A = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 637 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
SCREAMING_SNAKE_CASE_ : int = 0, 0, 0
SCREAMING_SNAKE_CASE_ : Tuple = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE_ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE_ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE_ : int = min(A__ , A__ , A__ )
ugly_nums.append(A__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 345 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__UpperCamelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : str ,_lowerCAmelCase : int ):
"""simple docstring"""
__snake_case = AudioClassificationPipeline(model=_lowercase ,feature_extractor=_lowercase )
# test with a raw waveform
__snake_case = np.zeros((34_000,) )
__snake_case = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Dict ):
"""simple docstring"""
__snake_case = examples
__snake_case = audio_classifier(_lowercase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowercase ,[
{"score": ANY(_lowercase ), "label": ANY(_lowercase )},
{"score": ANY(_lowercase ), "label": ANY(_lowercase )},
] ,)
__snake_case = audio_classifier(_lowercase ,top_k=1 )
self.assertEqual(
_lowercase ,[
{"score": ANY(_lowercase ), "label": ANY(_lowercase )},
] ,)
self.run_torchaudio(_lowercase )
@require_torchaudio
def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
import datasets
# test with a local file
__snake_case = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
__snake_case = dataset[0]['''audio''']['''array''']
__snake_case = audio_classifier(_lowercase )
self.assertEqual(
_lowercase ,[
{"score": ANY(_lowercase ), "label": ANY(_lowercase )},
{"score": ANY(_lowercase ), "label": ANY(_lowercase )},
] ,)
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''anton-l/wav2vec2-random-tiny-classifier'''
__snake_case = pipeline("audio-classification" ,model=_lowercase )
__snake_case = np.ones((8_000,) )
__snake_case = audio_classifier(_lowercase ,top_k=4 )
__snake_case = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__snake_case = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_lowercase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__snake_case = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__snake_case = audio_classifier(_lowercase ,top_k=4 )
self.assertIn(nested_simplify(_lowercase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
import datasets
__snake_case = '''superb/wav2vec2-base-superb-ks'''
__snake_case = pipeline("audio-classification" ,model=_lowercase )
__snake_case = datasets.load_dataset("anton-l/superb_dummy" ,"ks" ,split="test" )
__snake_case = np.array(dataset[3]["speech"] ,dtype=np.floataa )
__snake_case = audio_classifier(_lowercase ,top_k=4 )
self.assertEqual(
nested_simplify(_lowercase ,decimals=3 ) ,[
{"score": 0.9_8_1, "label": "go"},
{"score": 0.0_0_7, "label": "up"},
{"score": 0.0_0_6, "label": "_unknown_"},
{"score": 0.0_0_1, "label": "down"},
] ,)
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
| 524 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 58 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self, _a, _a = None, _a = None, _a = None, _a = False, _a = False, _a = None, _a = None, **_a, ) -> Union[str, Any]:
super().__init__(
_lowercase, split=_lowercase, features=_lowercase, cache_dir=_lowercase, keep_in_memory=_lowercase, streaming=_lowercase, num_proc=_lowercase, **_lowercase, )
__SCREAMING_SNAKE_CASE = field
__SCREAMING_SNAKE_CASE = path_or_paths if isinstance(_lowercase, _lowercase ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE = Json(
cache_dir=_lowercase, data_files=_lowercase, features=_lowercase, field=_lowercase, **_lowercase, )
def __lowerCAmelCase ( self ) -> Dict:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=_lowercase, download_mode=_lowercase, verification_mode=_lowercase, base_path=_lowercase, num_proc=self.num_proc, )
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split, verification_mode=_lowercase, in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a, _a = None, _a = None, **_a, ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = path_or_buf
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = '''utf-8'''
__SCREAMING_SNAKE_CASE = to_json_kwargs
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("path_or_buf", _lowercase )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("orient", "records" )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("lines", True if orient == "records" else False )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("index", False if orient in ["split", "table"] else True )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("compression", _lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf, "wb", compression=_lowercase ) as buffer:
__SCREAMING_SNAKE_CASE = self._write(file_obj=_lowercase, orient=_lowercase, lines=_lowercase, index=_lowercase, **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
__SCREAMING_SNAKE_CASE = self._write(
file_obj=self.path_or_buf, orient=_lowercase, lines=_lowercase, index=_lowercase, **self.to_json_kwargs )
return written
def __lowerCAmelCase ( self, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data, key=slice(_lowercase, offset + self.batch_size ), indices=self.dataset._indices, )
__SCREAMING_SNAKE_CASE = batch.to_pandas().to_json(
path_or_buf=_lowercase, orient=_lowercase, lines=_lowercase, index=_lowercase, **_lowercase )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCAmelCase ( self, _a, _a, _a, _a, **_a, ) -> List[str]:
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
__SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowercase )
else:
__SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, _lowercase, _lowercase )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
written += file_obj.write(_lowercase )
return written
| 693 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a__ : List[str] = logging.get_logger(__name__)
class __snake_case :
__lowerCAmelCase = None
@experimental
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
A__ , A__ , A__ , A__ , A__ , A__ , A__ )
return _map_with_joblib(A__ , A__ , A__ , A__ , A__ , A__ , A__ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
snake_case__ = num_proc if num_proc <= len(A__ ) else len(A__ )
snake_case__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(A__ ):
snake_case__ = len(A__ ) // num_proc
snake_case__ = len(A__ ) % num_proc
snake_case__ = div * index + min(A__ , A__ )
snake_case__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(A__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(A__ )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(A__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
snake_case__ = None, None
if not disable_tqdm:
snake_case__ = (RLock(),), tqdm.set_lock
with Pool(A__ , initargs=A__ , initializer=A__ ) as pool:
snake_case__ = pool.map(A__ , A__ )
logger.info(f'''Finished {num_proc} processes''' )
snake_case__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(A__ )} objects''' )
return mapped
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=A__ ):
return joblib.Parallel()(
joblib.delayed(A__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase ( UpperCAmelCase_ ) ->Optional[Any]:
snake_case__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ = None
| 368 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase__ : Union[str, Any] = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class lowerCamelCase ( unittest.TestCase , _UpperCAmelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->str:
UpperCAmelCase_ = load_tool('''text-question-answering''' )
self.tool.setup()
UpperCAmelCase_ = load_tool('''text-question-answering''' , remote=_lowercase )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
UpperCAmelCase_ = self.tool(_lowercase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
UpperCAmelCase_ = self.remote_tool(_lowercase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = self.tool(text=_lowercase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
UpperCAmelCase_ = self.remote_tool(text=_lowercase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
| 390 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( _lowerCamelCase : Any ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowercase : Optional[int] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
lowerCamelCase_ = model_type
lowerCamelCase_ = tf_checkpoint
lowerCamelCase_ = pytorch_dump_output
lowerCamelCase_ = config
lowerCamelCase_ = finetuning_task_name
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
else:
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' ) | 142 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
_UpperCAmelCase : Tuple = np.inf
def set_batch_size(_lowerCAmelCase : List[Any] ) -> None:
nonlocal batch_size
if isinstance(A__, A__ ):
_UpperCAmelCase : List[str] = min(A__, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A__, A__ ):
_UpperCAmelCase : Any = min(A__, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A__, A__ ) and feature.dtype == "binary":
_UpperCAmelCase : Optional[Any] = min(A__, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A__, A__ )
return None if batch_size is np.inf else batch_size
class _UpperCAmelCase ( _UpperCAmelCase):
def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , **_A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
_UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths}
_UpperCAmelCase : str = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCAmelCase : Optional[int] = Parquet(
cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , hash=_lowercase , **_lowercase , )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
if self.streaming:
_UpperCAmelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
_UpperCAmelCase : Any = self.builder.as_dataset(
split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self , _A , _A , _A = None , **_A , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = dataset
_UpperCAmelCase : List[str] = path_or_buf
_UpperCAmelCase : int = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase : str = parquet_writer_kwargs
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
_UpperCAmelCase : Any = self._write(file_obj=_lowercase , batch_size=_lowercase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=_lowercase , **self.parquet_writer_kwargs )
return written
def __snake_case ( self , _A , _A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Any = parquet_writer_kwargs.pop("""path_or_buf""" , _lowercase )
_UpperCAmelCase : List[str] = self.dataset.features.arrow_schema
_UpperCAmelCase : Any = pq.ParquetWriter(_lowercase , schema=_lowercase , **_lowercase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _lowercase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
_UpperCAmelCase : Optional[int] = query_table(
table=self.dataset._data , key=slice(_lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_lowercase )
written += batch.nbytes
writer.close()
return written
| 238 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
import unittest
from knapsack import knapsack as k
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 0
_A = [0]
_A = [0]
_A = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
_A = [60]
_A = [10]
_A = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__ ( self ):
_A = 3
_A = [1, 2, 3]
_A = [3, 2, 1]
_A = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__ ( self ):
_A = 50
_A = [60, 100, 120]
_A = [10, 20, 30]
_A = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 27 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 637 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ ( _UpperCAmelCase ):
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
class snake_case_ ( nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
__lowerCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ : str = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Any = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ : str = nn.Conv(
_lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = nn.Conv(
_lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = blocks
SCREAMING_SNAKE_CASE_ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.conv_in(_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = nn.silu(_lowercase )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ : Dict = block(_lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.silu(_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.conv_out(_lowercase )
return embedding
@flax_register_to_config
class snake_case_ ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
__lowerCamelCase : int = 32
__lowerCamelCase : int = 4
__lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCamelCase : Union[bool, Tuple[bool]] = False
__lowerCamelCase : Tuple[int] = (320, 640, 1280, 1280)
__lowerCamelCase : int = 2
__lowerCamelCase : Union[int, Tuple[int]] = 8
__lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
__lowerCamelCase : int = 1280
__lowerCamelCase : float = 0.0
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : str = "rgb"
__lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
def __A ( self , __lowerCAmelCase ):
# init input tensors
SCREAMING_SNAKE_CASE_ : Any = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ : Tuple = jnp.zeros(_lowercase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.zeros(_lowercase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(_lowercase )
SCREAMING_SNAKE_CASE_ : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )["params"]
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.block_out_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ : Any = self.only_cross_attention
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE_ : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = block_out_channels[0]
SCREAMING_SNAKE_CASE_ : List[str] = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ : Tuple = output_channel
SCREAMING_SNAKE_CASE_ : Any = block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i == len(_lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ : int = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ : Dict = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ : int = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
if not is_final_block:
SCREAMING_SNAKE_CASE_ : str = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = down_blocks
SCREAMING_SNAKE_CASE_ : List[str] = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ : List[Any] = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.flip(_lowercase , axis=1 )
# 1. time
if not isinstance(_lowercase , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ : int = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.expand_dims(_lowercase , 0 )
SCREAMING_SNAKE_CASE_ : Dict = self.time_proj(_lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.time_embedding(_lowercase )
# 2. pre-process
SCREAMING_SNAKE_CASE_ : Any = jnp.transpose(_lowercase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : str = self.conv_in(_lowercase )
SCREAMING_SNAKE_CASE_ : Any = jnp.transpose(_lowercase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Dict = self.controlnet_cond_embedding(_lowercase )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ : Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ : Dict = down_block(_lowercase , _lowercase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ : List[str] = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(_lowercase , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = controlnet_block(_lowercase )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ : List[str] = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.controlnet_mid_block(_lowercase )
# 6. scaling
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowercase , mid_block_res_sample=_lowercase )
| 345 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase = '''marian'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] ,_lowerCAmelCase : str=58_101 ,_lowerCAmelCase : Union[str, Any]=None ,_lowerCAmelCase : Tuple=1_024 ,_lowerCAmelCase : List[Any]=12 ,_lowerCAmelCase : int=4_096 ,_lowerCAmelCase : int=16 ,_lowerCAmelCase : str=12 ,_lowerCAmelCase : List[str]=4_096 ,_lowerCAmelCase : Tuple=16 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Any=0.0 ,_lowerCAmelCase : List[Any]=True ,_lowerCAmelCase : Dict=True ,_lowerCAmelCase : Union[str, Any]="gelu" ,_lowerCAmelCase : int=1_024 ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Optional[int]=0.0 ,_lowerCAmelCase : str=0.0_2 ,_lowerCAmelCase : Tuple=58_100 ,_lowerCAmelCase : int=False ,_lowerCAmelCase : Any=58_100 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : List[Any]=True ,**_lowerCAmelCase : int ,):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = decoder_vocab_size or vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase ,eos_token_id=_lowercase ,is_encoder_decoder=_lowercase ,decoder_start_token_id=_lowercase ,forced_eos_token_id=_lowercase ,**_lowercase ,)
class UpperCamelCase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase ,direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = self.num_layers
for i in range(_lowercase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(_lowercase ,self ).outputs
if self.use_past:
__snake_case = self.num_layers
for i in range(_lowercase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
__snake_case = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**_lowercase ,**_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowercase ,_lowercase )] ,dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case = self.num_layers
__snake_case = min(_lowercase ,_lowercase )
__snake_case = max(_lowercase ,_lowercase ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase ,_lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = self.num_layers
__snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowercase ,_lowercase ,dtype=_lowercase )] ,dim=1 )
__snake_case = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = compute_effective_axis_dimension(
_lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(_lowercase )
__snake_case = compute_effective_axis_dimension(
_lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(_lowercase ,return_tensors=_lowercase ) )
return common_inputs
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )
else:
__snake_case = self._generate_dummy_inputs_for_causal_lm(
_lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )
return common_inputs
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Any ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(_lowercase ,_lowercase ,_lowercase ,_lowercase )
else:
__snake_case = super(_lowercase ,self )._flatten_past_key_values_(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
@property
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
return 1E-4
| 524 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
_snake_case : Tuple = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_snake_case : Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_snake_case : int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
], )
def __lowerCAmelCase ( self ) -> List[str]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __lowerCAmelCase ( self, _a, _a, _a=None, _a="uniform_average", _a=True ) -> Tuple:
__SCREAMING_SNAKE_CASE = mean_squared_error(
_lowercase, _lowercase, sample_weight=_lowercase, multioutput=_lowercase, squared=_lowercase )
return {"mse": mse}
| 693 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : List[str] = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __snake_case ( _UpperCAmelCase ):
__lowerCAmelCase = '''owlvit_text_model'''
def __init__( self , UpperCamelCase_=4_9408 , UpperCamelCase_=512 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=16 , UpperCamelCase_="quick_gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1.0 , UpperCamelCase_=0 , UpperCamelCase_=4_9406 , UpperCamelCase_=4_9407 , **UpperCamelCase_ , ) -> Any:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = intermediate_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = max_position_embeddings
snake_case__ = hidden_act
snake_case__ = layer_norm_eps
snake_case__ = attention_dropout
snake_case__ = initializer_range
snake_case__ = initializer_factor
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> Any:
cls._set_token_in_kwargs(_lowercase )
snake_case__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
snake_case__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class __snake_case ( _UpperCAmelCase ):
__lowerCAmelCase = '''owlvit_vision_model'''
def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=3072 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=768 , UpperCamelCase_=32 , UpperCamelCase_="quick_gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1.0 , **UpperCamelCase_ , ) -> Any:
super().__init__(**_lowercase )
snake_case__ = hidden_size
snake_case__ = intermediate_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = hidden_act
snake_case__ = layer_norm_eps
snake_case__ = attention_dropout
snake_case__ = initializer_range
snake_case__ = initializer_factor
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
cls._set_token_in_kwargs(_lowercase )
snake_case__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
snake_case__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class __snake_case ( _UpperCAmelCase ):
__lowerCAmelCase = '''owlvit'''
__lowerCAmelCase = True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=512 , UpperCamelCase_=2.6_5_9_2 , UpperCamelCase_=True , **UpperCamelCase_ , ) -> str:
super().__init__(**_lowercase )
if text_config is None:
snake_case__ = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
snake_case__ = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
snake_case__ = OwlViTTextConfig(**_lowercase )
snake_case__ = OwlViTVisionConfig(**_lowercase )
snake_case__ = projection_dim
snake_case__ = logit_scale_init_value
snake_case__ = return_dict
snake_case__ = 1.0
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
cls._set_token_in_kwargs(_lowercase )
snake_case__ = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
snake_case__ = {}
snake_case__ = text_config
snake_case__ = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def _snake_case ( self ) -> List[Any]:
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.text_config.to_dict()
snake_case__ = self.vision_config.to_dict()
snake_case__ = self.__class__.model_type
return output
class __snake_case ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Any:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def _snake_case ( self ) -> List[str]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def _snake_case ( self ) -> List[str]:
return 1E-4
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = None , ) -> Tuple:
snake_case__ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
snake_case__ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def _snake_case ( self ) -> List[Any]:
return 14
| 368 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : Union[str, Any] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowercase__ : List[str] = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
lowercase__ : Tuple = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = RealmTokenizer
def __init__( self : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Any="[UNK]" , UpperCAmelCase__ : int="[SEP]" , UpperCAmelCase__ : List[Any]="[PAD]" , UpperCAmelCase__ : Union[str, Any]="[CLS]" , UpperCAmelCase__ : str="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] , ) ->int:
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(_lowercase , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**_lowercase )
UpperCAmelCase_ = do_lower_case
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
UpperCAmelCase_ = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ = text
UpperCAmelCase_ = kwargs.pop('''text_pair''' , _lowercase )
UpperCAmelCase_ = kwargs.pop('''return_tensors''' , _lowercase )
UpperCAmelCase_ = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_lowercase ):
if batch_text_pair is not None:
UpperCAmelCase_ = batch_text_pair[idx]
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = super().__call__(_lowercase , _lowercase , return_tensors=_lowercase , **_lowercase )
UpperCAmelCase_ = encoded_candidates.get('''input_ids''' )
UpperCAmelCase_ = encoded_candidates.get('''attention_mask''' )
UpperCAmelCase_ = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowercase )
UpperCAmelCase_ = {key: item for key, item in output_data.items() if len(_lowercase ) != 0}
return BatchEncoding(_lowercase , tensor_type=_lowercase )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]=None ) ->List[str]:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) ->int:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Dict:
UpperCAmelCase_ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 390 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__lowercase : Union[str, Any] = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__lowercase : int = 'hopper-medium-v2'
__lowercase : Any = gym.make(env_name)
__lowercase : List[str] = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
__lowercase : Optional[Any] = env.reset()
__lowercase : Optional[int] = 0
__lowercase : List[str] = 0
__lowercase : List[Any] = 1_0_0_0
__lowercase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__lowercase : int = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__lowercase : List[Any] = env.step(denorm_actions)
__lowercase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__lowercase : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''') | 142 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class _UpperCAmelCase ( _UpperCAmelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__a : ClassVar[Features] = Features({"""text""": Value("""string""")})
__a : ClassVar[Features] = Features({"""labels""": ClassLabel})
__a : str = "text"
__a : str = "labels"
def __snake_case ( self , _A ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_UpperCAmelCase : int = copy.deepcopy(self )
_UpperCAmelCase : List[str] = self.label_schema.copy()
_UpperCAmelCase : List[Any] = features[self.label_column]
_UpperCAmelCase : str = label_schema
return task_template
@property
def __snake_case ( self ) -> str:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 238 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ = LxmertConfig.from_json_file(A__ )
print(f'Building PyTorch model from configuration: {config}' )
UpperCAmelCase_ = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
from functools import reduce
__A : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(A__ ) * int(A__ ) ) , n[i : i + 13] ) )
for i in range(len(A__ ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__a : Tuple = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : str = None , UpperCamelCase_ : uuid.UUID = None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None ):
"""simple docstring"""
if not conversation_id:
__A = uuid.uuida()
if past_user_inputs is None:
__A = []
if generated_responses is None:
__A = []
__A = conversation_id
__A = past_user_inputs
__A = generated_responses
__A = text
def __eq__( self : Optional[int] , UpperCamelCase_ : Dict ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__A = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__A = text
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A = None
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str ):
"""simple docstring"""
self.generated_responses.append(_lowercase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
"""simple docstring"""
__A = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__A = '''user''' if is_user else '''bot'''
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
_UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowercase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any] ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
__A = self.tokenizer.eos_token
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = {}
__A = {}
__A = {}
if min_length_for_response is not None:
__A = min_length_for_response
if minimum_tokens is not None:
__A = minimum_tokens
if "max_length" in generate_kwargs:
__A = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , UpperCamelCase_ : Union[Conversation, List[Conversation]] , UpperCamelCase_ : Optional[Any]=0 , **UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Conversation , UpperCamelCase_ : str=32 ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation\'s `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__A = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
__A = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=10 , **UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__A = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__A = max_length - minimum_tokens
__A = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A = model_inputs['''attention_mask'''][:, -trim:]
__A = model_inputs.pop("""conversation""" )
__A = max_length
__A = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
__A = 1
else:
__A = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int=True ):
"""simple docstring"""
__A = model_outputs['''output_ids''']
__A = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
__A = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Conversation ):
"""simple docstring"""
__A = self.tokenizer.eos_token_id
__A = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
__A = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 637 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case_ ( unittest.TestCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_lowercase , _lowercase )
def __A ( self , **__lowerCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , **__lowerCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , **__lowerCAmelCase ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : List[Any] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor(do_normalize=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(_lowercase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=_lowercase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=_lowercase )
SCREAMING_SNAKE_CASE_ : int = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE_ : str = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Any = processor.batch_decode(_lowercase )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 345 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase = 42
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : Optional[Any] ,_lowerCAmelCase : int = 32 ,_lowerCAmelCase : int = 64 ,_lowerCAmelCase : int = 20 ,_lowerCAmelCase : int = 768 ,_lowerCAmelCase : Dict=77 ,_lowerCAmelCase : List[str]=4 ,_lowerCAmelCase : float = 0.0 ,_lowerCAmelCase : str = "silu" ,_lowerCAmelCase : Optional[str] = None ,_lowerCAmelCase : Optional[str] = None ,_lowerCAmelCase : Optional[str] = "linear" ,_lowerCAmelCase : Optional[str] = "prd" ,_lowerCAmelCase : Optional[int] = None ,_lowerCAmelCase : Optional[int] = None ,_lowerCAmelCase : Optional[int] = None ,):
"""simple docstring"""
super().__init__()
__snake_case = num_attention_heads
__snake_case = attention_head_dim
__snake_case = num_attention_heads * attention_head_dim
__snake_case = additional_embeddings
__snake_case = time_embed_dim or inner_dim
__snake_case = embedding_proj_dim or embedding_dim
__snake_case = clip_embed_dim or embedding_dim
__snake_case = Timesteps(_lowercase ,_lowercase ,0 )
__snake_case = TimestepEmbedding(_lowercase ,_lowercase ,out_dim=_lowercase ,act_fn=_lowercase )
__snake_case = nn.Linear(_lowercase ,_lowercase )
if embedding_proj_norm_type is None:
__snake_case = None
elif embedding_proj_norm_type == "layer":
__snake_case = nn.LayerNorm(_lowercase )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__snake_case = nn.Linear(_lowercase ,_lowercase )
if encoder_hid_proj_type is None:
__snake_case = None
elif encoder_hid_proj_type == "linear":
__snake_case = nn.Linear(_lowercase ,_lowercase )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__snake_case = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_lowercase ) )
if added_emb_type == "prd":
__snake_case = nn.Parameter(torch.zeros(1 ,1 ,_lowercase ) )
elif added_emb_type is None:
__snake_case = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__snake_case = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase ,_lowercase ,_lowercase ,dropout=_lowercase ,activation_fn="gelu" ,attention_bias=_lowercase ,)
for d in range(_lowercase )
] )
if norm_in_type == "layer":
__snake_case = nn.LayerNorm(_lowercase )
elif norm_in_type is None:
__snake_case = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
__snake_case = nn.LayerNorm(_lowercase )
__snake_case = nn.Linear(_lowercase ,_lowercase )
__snake_case = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
__snake_case = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" ,_lowercase ,persistent=_lowercase )
__snake_case = nn.Parameter(torch.zeros(1 ,_lowercase ) )
__snake_case = nn.Parameter(torch.zeros(1 ,_lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {}
def fn_recursive_add_processors(_lowerCAmelCase : str ,_lowerCAmelCase : torch.nn.Module ,_lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_lowercase ,"set_processor" ):
__snake_case = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_lowercase ,_lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase ,_lowercase ,_lowercase )
return processors
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
__snake_case = len(self.attn_processors.keys() )
if isinstance(_lowercase ,_lowercase ) and len(_lowercase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_lowercase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_lowerCAmelCase : str ,_lowerCAmelCase : torch.nn.Module ,_lowerCAmelCase : Tuple ):
if hasattr(_lowercase ,"set_processor" ):
if not isinstance(_lowercase ,_lowercase ):
module.set_processor(_lowercase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_lowercase ,_lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase ,_lowercase ,_lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Union[torch.Tensor, float, int] ,_lowerCAmelCase : torch.FloatTensor ,_lowerCAmelCase : Optional[torch.FloatTensor] = None ,_lowerCAmelCase : Optional[torch.BoolTensor] = None ,_lowerCAmelCase : bool = True ,):
"""simple docstring"""
__snake_case = hidden_states.shape[0]
__snake_case = timestep
if not torch.is_tensor(_lowercase ):
__snake_case = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
__snake_case = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__snake_case = timesteps * torch.ones(_lowercase ,dtype=timesteps.dtype ,device=timesteps.device )
__snake_case = self.time_proj(_lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__snake_case = timesteps_projected.to(dtype=self.dtype )
__snake_case = self.time_embedding(_lowercase )
if self.embedding_proj_norm is not None:
__snake_case = self.embedding_proj_norm(_lowercase )
__snake_case = self.embedding_proj(_lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__snake_case = self.encoder_hidden_states_proj(_lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__snake_case = self.proj_in(_lowercase )
__snake_case = self.positional_embedding.to(hidden_states.dtype )
__snake_case = []
__snake_case = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__snake_case = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__snake_case = hidden_states[:, None, :]
__snake_case = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__snake_case = self.prd_embedding.to(hidden_states.dtype ).expand(_lowercase ,-1 ,-1 )
additional_embeds.append(_lowercase )
__snake_case = torch.cat(
_lowercase ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__snake_case = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__snake_case = F.pad(
_lowercase ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
__snake_case = hidden_states + positional_embeddings
if attention_mask is not None:
__snake_case = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
__snake_case = F.pad(_lowercase ,(0, self.additional_embeddings) ,value=0.0 )
__snake_case = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__snake_case = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
__snake_case = self.norm_in(_lowercase )
for block in self.transformer_blocks:
__snake_case = block(_lowercase ,attention_mask=_lowercase )
__snake_case = self.norm_out(_lowercase )
if self.prd_embedding is not None:
__snake_case = hidden_states[:, -1]
else:
__snake_case = hidden_states[:, additional_embeddings_len:]
__snake_case = self.proj_to_clip_embeddings(_lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase )
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 524 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = 6 ) -> str:
'''simple docstring'''
snake_case_ : Node | None = None
snake_case_ : Node | None = None
self.create_linked_list(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Node()
snake_case_ : str = current_node
snake_case_ : Any = current_node
snake_case_ : Union[str, Any] = current_node
for _ in range(1 , _lowercase ):
snake_case_ : List[str] = Node()
snake_case_ : Optional[Any] = current_node
snake_case_ : Optional[Any] = previous_node
snake_case_ : Tuple = current_node
snake_case_ : List[Any] = self.front
snake_case_ : List[str] = previous_node
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case_ : Optional[int] = self.rear.next
if self.rear:
snake_case_ : str = data
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case_ : List[str] = self.front.data
snake_case_ : Any = None
return data
snake_case_ : Union[str, Any] = self.front
snake_case_ : List[Any] = old_front.next
snake_case_ : str = old_front.data
snake_case_ : List[Any] = None
return data
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
snake_case_ : Any | None = None
snake_case_ : Node | None = None
snake_case_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case : Optional[Any] = 16
_snake_case : Dict = 32
def _A ( __snake_case :Optional[Any] ) -> Dict:
"""simple docstring"""
return int(x / 2**20 )
class __SCREAMING_SNAKE_CASE :
def __enter__( self ) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
return self
def __exit__( self, *_a ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
__SCREAMING_SNAKE_CASE = bamb(self.end - self.begin )
__SCREAMING_SNAKE_CASE = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _A ( __snake_case :Dict , __snake_case :List[str] = 16 , __snake_case :Optional[Any] = "bert-base-cased" , __snake_case :Dict = 320 , __snake_case :List[Any] = 160 , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
__SCREAMING_SNAKE_CASE = load_dataset(
"glue" , "mrpc" , split={"train": f'''train[:{n_train}]''', "validation": f'''validation[:{n_val}]'''} )
def tokenize_function(__snake_case :Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__SCREAMING_SNAKE_CASE = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def _A ( __snake_case :Tuple , __snake_case :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config['''lr''']
__SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE = int(config["seed"] )
__SCREAMING_SNAKE_CASE = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(A__ )
__SCREAMING_SNAKE_CASE = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__SCREAMING_SNAKE_CASE = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
__SCREAMING_SNAKE_CASE = 0
# Now we train the model
__SCREAMING_SNAKE_CASE = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
__SCREAMING_SNAKE_CASE = model(**A__ )
__SCREAMING_SNAKE_CASE = outputs.loss
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__SCREAMING_SNAKE_CASE = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=A__ , default=A__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=A__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=A__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=1 , help="Number of train epochs." , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 693 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a__ : Optional[int] = logging.getLogger()
def __lowerCamelCase ( ) ->Optional[Any]:
snake_case__ = argparse.ArgumentParser()
parser.add_argument('-f' )
snake_case__ = parser.parse_args()
return args.f
def __lowerCamelCase ( UpperCAmelCase_ ) ->List[Any]:
snake_case__ = {}
snake_case__ = os.path.join(A__ , 'all_results.json' )
if os.path.exists(A__ ):
with open(A__ , 'r' ) as f:
snake_case__ = json.load(A__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def __lowerCamelCase ( ) ->Optional[Any]:
snake_case__ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( _UpperCAmelCase ):
@classmethod
def _snake_case ( cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case__ = tempfile.mkdtemp()
snake_case__ = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _snake_case ( cls ) -> Optional[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> List[str]:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Any:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case__ = 7 if get_gpu_count() > 1 else 2
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Optional[Any]:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> List[Any]:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Dict:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'translation_no_trainer' ) ) )
@slow
def _snake_case ( self ) -> Optional[int]:
snake_case__ = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _snake_case ( self ) -> Any:
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
snake_case__ = get_results(_lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , 'image_classification_no_trainer' ) ) )
| 368 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : int=32 * 8 , UpperCAmelCase__ : str=32 * 8 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=64 , ) ->int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 128
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def lowerCAmelCase__ ( self : List[str] ) ->Any:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) ->int:
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=False ) ->Union[str, Any]:
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase_ = model(pixel_values=_lowercase , pixel_mask=_lowercase )
UpperCAmelCase_ = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) ->Any:
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(UpperCAmelCase__ : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_lowercase , pixel_mask=_lowercase )
UpperCAmelCase_ = model(_lowercase )
comm_check_on_output(_lowercase )
UpperCAmelCase_ = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCAmelCase__ ( self : str ) ->List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCAmelCase__ ( self : int ) ->List[str]:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCAmelCase__ ( self : List[str] ) ->int:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
pass
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Dict:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__ ( self : Any ) ->Any:
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
UpperCAmelCase_ = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase ).to(_lowercase )
UpperCAmelCase_ = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCAmelCase_ = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_lowercase ).to(_lowercase )
model.train()
UpperCAmelCase_ = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : Optional[int] = 1e-4
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Any ) ->int:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase )
UpperCAmelCase_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
UpperCAmelCase_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
UpperCAmelCase_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__ ( self : int ) ->List[Any]:
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
UpperCAmelCase_ = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_lowercase )
UpperCAmelCase_ = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 390 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
__lowercase :List[str] = '''SpeechT5FeatureExtractor'''
__lowercase :Optional[Any] = '''SpeechT5Tokenizer'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''audio''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''text''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''text_target''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''audio_target''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''sampling_rate''' , _lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
lowerCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
else:
lowerCamelCase_ = None
if audio_target is not None:
lowerCamelCase_ = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
lowerCamelCase_ = targets['''input_values''']
elif text_target is not None:
lowerCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''input_values''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''input_ids''' , _lowercase )
lowerCamelCase_ = kwargs.pop('''labels''' , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
lowerCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
else:
lowerCamelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
lowerCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = self.feature_extractor.feature_size
lowerCamelCase_ = self.feature_extractor.num_mel_bins
lowerCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
lowerCamelCase_ = feature_size_hack
lowerCamelCase_ = targets['''input_values''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase ) | 142 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _UpperCAmelCase ( unittest.TestCase):
__a : List[Any] = inspect.getfile(accelerate.test_utils)
__a : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_cli.py"""])
__a : Any = ['''accelerate''', '''launch''']
__a : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__a : Optional[int] = '''default_config.yaml'''
__a : Optional[Any] = config_folder / config_file
__a : Optional[Any] = config_folder / '''_default_config.yaml'''
__a : Optional[Any] = Path("""tests/test_configs""")
@classmethod
def __snake_case ( cls ) -> Tuple:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __snake_case ( cls ) -> Optional[Any]:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class _UpperCAmelCase ( unittest.TestCase):
__a : str = '''test-tpu'''
__a : Tuple = '''us-central1-a'''
__a : Optional[int] = '''ls'''
__a : Dict = ['''accelerate''', '''tpu-config''']
__a : Tuple = '''cd /usr/share'''
__a : List[Any] = '''tests/test_samples/test_command_file.sh'''
__a : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowercase , )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowercase , )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , _lowercase , )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowercase , )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all''' , _lowercase , )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , _lowercase , )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all''' , _lowercase , )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all''' , _lowercase , )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all''' , _lowercase , )
| 238 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *__snake_case : Optional[int] , **__snake_case : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Any , __snake_case : Dict , __snake_case : str ):
UpperCAmelCase_ = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str ):
UpperCAmelCase_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
UpperCAmelCase_ = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
UpperCAmelCase_ = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCamelCase_ ( self : int ):
pass
@require_torch
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
UpperCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_lowercase )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase )
UpperCAmelCase_ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
UpperCAmelCase_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
UpperCAmelCase_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = '''facebook/detr-resnet-50'''
UpperCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_lowercase )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase )
UpperCAmelCase_ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
UpperCAmelCase_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
UpperCAmelCase_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''facebook/detr-resnet-50'''
UpperCAmelCase_ = pipeline('''object-detection''' , model=_lowercase )
UpperCAmelCase_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
UpperCAmelCase_ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = 0.9_985
UpperCAmelCase_ = '''facebook/detr-resnet-50'''
UpperCAmelCase_ = pipeline('''object-detection''' , model=_lowercase )
UpperCAmelCase_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = '''Narsil/layoutlmv3-finetuned-funsd'''
UpperCAmelCase_ = 0.9_993
UpperCAmelCase_ = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
UpperCAmelCase_ = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 144 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 27 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : Optional[Any] , __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__A = RemBertConfig.from_json_file(A__ )
print("""Building PyTorch model from configuration: {}""".format(str(A__ ) ) )
__A = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a : Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 637 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, list[float]]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(range(len(A__ ) ) )
SCREAMING_SNAKE_CASE_ : Any = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=A__ )
SCREAMING_SNAKE_CASE_ : float = 0
SCREAMING_SNAKE_CASE_ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE_ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowerCamelCase__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowerCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def _lowerCamelCase( __snake_case , __snake_case ) -> str:
with open(A__ , "r" , encoding="utf-8" ) as f:
__snake_case = json.loads(f.read() )
__snake_case = collections.OrderedDict()
__snake_case = collections.OrderedDict()
__snake_case = collections.OrderedDict()
with open(A__ , "r" , encoding="utf-8" ) as f:
__snake_case = f.readlines()
__snake_case = [[t.rstrip("\n" )] if (t == ''',''' or ''',''' not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A__ ):
__snake_case = b
__snake_case = idx
for wd in b:
__snake_case = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_lowerCAmelCase : Dict ,_lowerCAmelCase : Optional[Any] ,_lowerCAmelCase : Union[str, Any]="<|endoftext|>" ,_lowerCAmelCase : Optional[int]="<|endoftext|>" ,_lowerCAmelCase : Optional[Any]="<|startoftext|>" ,_lowerCAmelCase : Any="<|endoftext|>" ,_lowerCAmelCase : Optional[int]=False ,**_lowerCAmelCase : Optional[Any] ,):
"""simple docstring"""
super().__init__(
unk_token=_lowercase ,pad_token=_lowercase ,bos_token=_lowercase ,eos_token=_lowercase ,do_clean_text=_lowercase ,**_lowercase ,)
if not os.path.isfile(_lowercase ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(_lowercase ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
__snake_case = do_clean_text
__snake_case = load_vocab_and_emoji(_lowercase ,_lowercase )
__snake_case = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCamelCase_ ( self : List[Any] ,_lowerCAmelCase : Dict ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(_lowercase ,clean=self.do_clean_text )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.vocab.get(_lowercase ,self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : int ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = ''''''.join(_lowercase ).strip()
return out_string
def UpperCamelCase_ ( self : Dict ,_lowerCAmelCase : "Conversation" ):
"""simple docstring"""
__snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase ,add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : str ,_lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
__snake_case = 0
if os.path.isdir(_lowercase ):
__snake_case = os.path.join(
_lowercase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(
_lowercase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
__snake_case = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
__snake_case = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_lowercase ,"w" ,encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
__snake_case = token_index
writer.write(",".join(_lowercase ) + "\n" )
index += 1
with open(_lowercase ,"w" ,encoding="utf-8" ) as writer:
json.dump(self.emoji ,_lowercase )
return vocab_file, emoji_file
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self : Optional[int] ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : int ,_lowerCAmelCase : Dict ):
"""simple docstring"""
__snake_case = vocab # same as swe
__snake_case = ids_to_tokens # same as bpe
__snake_case = emoji
__snake_case = np.max([len(_lowercase ) for w in self.vocab.keys()] )
__snake_case = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
__snake_case = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
__snake_case = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
__snake_case = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__snake_case = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__snake_case = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
__snake_case = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
__snake_case = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
__snake_case = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : int ):
"""simple docstring"""
__snake_case = self.content_repattera.sub("<URL>" ,_lowercase )
__snake_case = self.content_repattera.sub("<EMAIL>" ,_lowercase )
__snake_case = self.content_repattera.sub("<TEL>" ,_lowercase )
__snake_case = self.content_repattera.sub("<DATE>" ,_lowercase )
__snake_case = self.content_repattera.sub("<DATE>" ,_lowercase )
__snake_case = self.content_repattera.sub("<PRICE>" ,_lowercase )
__snake_case = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__snake_case = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" )
return content
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Tuple ,_lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
__snake_case = text.replace(" " ,"<SP>" )
__snake_case = text.replace(" " ,"<SP>" )
__snake_case = text.replace("\r\n" ,"<BR>" )
__snake_case = text.replace("\n" ,"<BR>" )
__snake_case = text.replace("\r" ,"<BR>" )
__snake_case = text.replace("\t" ,"<TAB>" )
__snake_case = text.replace("—" ,"ー" )
__snake_case = text.replace("−" ,"ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
__snake_case = text.replace(_lowercase ,_lowercase )
if clean:
__snake_case = self.clean_text(_lowercase )
def check_simbol(_lowerCAmelCase : int ):
__snake_case = x.encode()
if len(_lowercase ) == 1 and len(_lowercase ) == 2:
__snake_case = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(_lowerCAmelCase : List[Any] ):
__snake_case = x.encode()
if len(_lowercase ) == 1 and len(_lowercase ) == 3:
__snake_case = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
__snake_case = 0
__snake_case = []
while pos < len(_lowercase ):
__snake_case = min(len(_lowercase ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
__snake_case = [] # (token_id, token, pos)
for e in range(_lowercase ,_lowercase ,-1 ):
__snake_case = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowercase ) > 2:
__snake_case = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowercase ) > 0:
# the smallest token_id is adopted
__snake_case = sorted(_lowercase ,key=lambda _lowerCAmelCase : x[0] )[0]
result.append(_lowercase )
__snake_case = e
else:
__snake_case = pos + 1
__snake_case = text[pos:end]
if check_simbol(_lowercase ):
result.append("<KIGOU>" )
elif checkuae(_lowercase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
__snake_case = end
return result
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Dict ,_lowerCAmelCase : int="\n" ):
"""simple docstring"""
__snake_case = []
__snake_case = []
__snake_case = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowercase ) > 0:
words.append(bytearray(_lowercase ).decode("utf-8" ,errors="replace" ) )
__snake_case = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(_lowercase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(_lowercase )
if len(_lowercase ) > 0:
words.append(bytearray(_lowercase ).decode("utf-8" ,errors="replace" ) )
__snake_case = ''''''.join(_lowercase )
return text
| 524 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 58 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_snake_case : str = random.Random()
if is_torch_available():
import torch
def _A ( __snake_case :Union[str, Any] , __snake_case :str=1.0 , __snake_case :int=None , __snake_case :int=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self, _a, _a=7, _a=4_00, _a=20_00, _a=1, _a=0.0, _a=1_60_00, _a=True, _a=True, ) -> int:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def __lowerCAmelCase ( self ) -> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self, _a=False, _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
__SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =ASTFeatureExtractor
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def __lowerCAmelCase ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0], return_tensors="np" ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0], return_tensors="np" ).input_values
self.assertTrue(np.allclose(_lowercase, _lowercase, atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feat_extract(_lowercase, padding=_lowercase, return_tensors="np" ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_lowercase, padding=_lowercase, return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase, _lowercase ):
self.assertTrue(np.allclose(_lowercase, _lowercase, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__SCREAMING_SNAKE_CASE = np.asarray(_lowercase )
__SCREAMING_SNAKE_CASE = feat_extract(_lowercase, return_tensors="np" ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_lowercase, return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase, _lowercase ):
self.assertTrue(np.allclose(_lowercase, _lowercase, atol=1E-3 ) )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(1_00 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCAmelCase ( self, _a ) -> List[Any]:
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __lowerCAmelCase ( self ) -> Optional[int]:
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_lowercase, return_tensors="pt" ).input_values
self.assertEquals(input_values.shape, (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowercase, atol=1E-4 ) )
| 693 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a__ : Any = logging.get_logger(__name__)
class __snake_case ( _UpperCAmelCase ):
__lowerCAmelCase = ['''input_features''']
def __init__( self , UpperCamelCase_=80 , UpperCamelCase_=1_6000 , UpperCamelCase_=160 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=0.0 , UpperCamelCase_=False , **UpperCamelCase_ , ) -> List[str]:
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
snake_case__ = n_fft
snake_case__ = hop_length
snake_case__ = chunk_length
snake_case__ = chunk_length * sampling_rate
snake_case__ = self.n_samples // hop_length
snake_case__ = sampling_rate
snake_case__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowercase , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_lowercase , norm='slaney' , mel_scale='slaney' , )
def _snake_case ( self , UpperCamelCase_ ) -> Optional[int]:
snake_case__ = spectrogram(
_lowercase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
snake_case__ = log_spec[:, :-1]
snake_case__ = np.maximum(_lowercase , log_spec.max() - 8.0 )
snake_case__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 0.0 ) -> List[str]:
if attention_mask is not None:
snake_case__ = np.array(_lowercase , np.intaa )
snake_case__ = []
for vector, length in zip(_lowercase , attention_mask.sum(-1 ) ):
snake_case__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
snake_case__ = padding_value
normed_input_values.append(_lowercase )
else:
snake_case__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "max_length" , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Any:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case__ = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
snake_case__ = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ = [np.asarray([raw_speech] ).T]
snake_case__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
snake_case__ = self.pad(
_lowercase , padding=_lowercase , max_length=max_length if max_length else self.n_samples , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
snake_case__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
snake_case__ = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
snake_case__ = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
snake_case__ = [self._np_extract_fbank_features(_lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , _lowercase ):
snake_case__ = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
else:
snake_case__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
snake_case__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
snake_case__ = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
def _snake_case ( self ) -> int:
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 368 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : Optional[Any] = 'src/diffusers'
lowercase__ : str = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : List[Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : List[str] = spec.loader.load_module()
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ):
'''simple docstring'''
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , A__ ) is not None
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = object_name.split('''.''' )
UpperCAmelCase_ = 0
# First let's find the module where our object lives.
UpperCAmelCase_ = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F"""{module}.py""" ) ):
i += 1
if i < len(A__ ):
UpperCAmelCase_ = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(A__ , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_ = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
return "".join(A__ )
lowercase__ : Tuple = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
lowercase__ : Dict = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
lowercase__ : Optional[int] = re.compile(R"<FILL\s+[^>]*>")
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = code.split('''\n''' )
UpperCAmelCase_ = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = len(get_indent(A__ ) ) > 0
if has_indent:
UpperCAmelCase_ = F"""class Bla:\n{code}"""
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A__ )
UpperCAmelCase_ = black.format_str(A__ , mode=A__ )
UpperCAmelCase_ = style_docstrings_in_code(A__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
UpperCAmelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ = search.groups()
UpperCAmelCase_ = find_code_in_diffusers(A__ )
UpperCAmelCase_ = get_indent(A__ )
UpperCAmelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_ = theoretical_indent
UpperCAmelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_ = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _should_continue(A__ , A__ ) and re.search(F"""^{indent}# End copy""" , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
UpperCAmelCase_ = ''''''.join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_ = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(A__ ) is None]
UpperCAmelCase_ = '''\n'''.join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
UpperCAmelCase_ = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
UpperCAmelCase_ = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ = pattern.groups()
UpperCAmelCase_ = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
UpperCAmelCase_ = re.sub(obja.lower() , obja.lower() , A__ )
UpperCAmelCase_ = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_ = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_ = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
return diffs
def __lowerCamelCase ( _UpperCamelCase : int = False ):
'''simple docstring'''
UpperCAmelCase_ = glob.glob(os.path.join(A__ , '''**/*.py''' ) , recursive=A__ )
UpperCAmelCase_ = []
for filename in all_files:
UpperCAmelCase_ = is_copy_consistent(A__ , A__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(A__ ) > 0:
UpperCAmelCase_ = '''\n'''.join(A__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 390 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowerCamelCase_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' ) | 142 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase : Union[str, Any] = jax.device_count()
_UpperCAmelCase : Optional[int] = num_samples * [prompt]
_UpperCAmelCase : Optional[int] = sd_pipe.prepare_inputs(_lowercase )
_UpperCAmelCase : int = replicate(_lowercase )
_UpperCAmelCase : str = shard(_lowercase )
_UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Any = jax.random.split(_lowercase , jax.device_count() )
_UpperCAmelCase : List[str] = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = '''stabilityai/stable-diffusion-2'''
_UpperCAmelCase : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowercase , subfolder="""scheduler""" )
_UpperCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , revision="""bf16""" , dtype=jnp.bfloataa , )
_UpperCAmelCase : Optional[Any] = scheduler_params
_UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase : List[Any] = jax.device_count()
_UpperCAmelCase : str = num_samples * [prompt]
_UpperCAmelCase : List[Any] = sd_pipe.prepare_inputs(_lowercase )
_UpperCAmelCase : str = replicate(_lowercase )
_UpperCAmelCase : List[Any] = shard(_lowercase )
_UpperCAmelCase : Any = jax.random.PRNGKey(0 )
_UpperCAmelCase : int = jax.random.split(_lowercase , jax.device_count() )
_UpperCAmelCase : int = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
_UpperCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : str = images[0, 2_53:2_56, 2_53:2_56, -1]
_UpperCAmelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : List[Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 238 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) -> Any:
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ) -> Tuple:
UpperCAmelCase_ = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str=7_0000 ) -> Tuple:
UpperCAmelCase_ = np.zeros(x.shape[1] )
for iterations in range(A__ ):
UpperCAmelCase_ = np.dot(A__ , A__ )
UpperCAmelCase_ = sigmoid_function(A__ )
UpperCAmelCase_ = np.dot(x.T , h - y ) / y.size
UpperCAmelCase_ = theta - alpha * gradient # updating the weights
UpperCAmelCase_ = np.dot(A__ , A__ )
UpperCAmelCase_ = sigmoid_function(A__ )
UpperCAmelCase_ = cost_function(A__ , A__ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCamelCase = datasets.load_iris()
_lowerCamelCase = iris.data[:, :2]
_lowerCamelCase = (iris.target != 0) * 1
_lowerCamelCase = 0.1
_lowerCamelCase = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> int:
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(_lowerCamelCase) = (x[:, 0].min(), x[:, 0].max())
(_lowerCamelCase) = (x[:, 1].min(), x[:, 1].max())
(_lowerCamelCase) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
_A = getattr(A__ , A__ )
if weight_type is not None:
_A = getattr(A__ , A__ ).shape
else:
_A = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
else:
_A = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = []
_A = fairseq_model.state_dict()
_A = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_A = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
_A = True
else:
for key, mapped_key in MAPPING.items():
_A = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_A = True
if "*" in mapped_key:
_A = name.split(A__ )[0].split('.' )[-2]
_A = mapped_key.replace('*' , A__ )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight" in name:
_A = '''weight'''
elif "bias" in name:
_A = '''bias'''
else:
_A = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = full_name.split('conv_layers.' )[-1]
_A = name.split('.' )
_A = int(items[0] )
_A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_A = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_A = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = SEWConfig()
if is_finetuned:
_A = model.wav_encoder.wav_model.cfg
else:
_A = model.cfg
_A = fs_config.conv_bias
_A = eval(fs_config.conv_feature_layers )
_A = [x[0] for x in conv_layers]
_A = [x[1] for x in conv_layers]
_A = [x[2] for x in conv_layers]
_A = '''gelu'''
_A = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_A = 0.0
_A = fs_config.activation_fn.name
_A = fs_config.encoder_embed_dim
_A = 0.02
_A = fs_config.encoder_ffn_embed_dim
_A = 1e-5
_A = fs_config.encoder_layerdrop
_A = fs_config.encoder_attention_heads
_A = fs_config.conv_pos_groups
_A = fs_config.conv_pos
_A = len(A__ )
_A = fs_config.encoder_layers
_A = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_A = model.cfg
_A = fs_config.final_dropout
_A = fs_config.layerdrop
_A = fs_config.activation_dropout
_A = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_A = fs_config.attention_dropout
_A = fs_config.dropout_input
_A = fs_config.dropout
_A = fs_config.mask_channel_length
_A = fs_config.mask_channel_prob
_A = fs_config.mask_length
_A = fs_config.mask_prob
_A = '''Wav2Vec2FeatureExtractor'''
_A = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
if is_finetuned:
_A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_A = SEWConfig.from_pretrained(A__ )
else:
_A = convert_config(model[0] , A__ )
_A = model[0].eval()
_A = True if config.feat_extract_norm == '''layer''' else False
_A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
if is_finetuned:
if dict_path:
_A = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.pad_index
_A = target_dict.bos_index
_A = target_dict.eos_index
_A = len(target_dict.symbols )
_A = os.path.join(A__ , 'vocab.json' )
if not os.path.isdir(A__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
with open(A__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A__ )
_A = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A__ , )
_A = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
_A = SEWForCTC(A__ )
else:
_A = SEWModel(A__ )
feature_extractor.save_pretrained(A__ )
recursively_load_weights(A__ , A__ , A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 27 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=56 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]="gelu_new" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : str="block_sparse" , UpperCamelCase_ : str=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Tuple=3 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
__A = rescale_embeddings
__A = attention_type
__A = use_bias
__A = block_size
__A = num_random_blocks
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A = config_and_inputs
__A = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __lowercase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
super().test_hidden_states_output()
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_lowercase , _lowercase )
__A = model_class(_lowercase )
@jax.jit
def model_jitted(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : Tuple ):
return model(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
with self.subTest("""JIT Enabled""" ):
__A = model_jitted(**_lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__A = model_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : Dict="outputs" , UpperCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
| 637 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__: Optional[int] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 345 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase ( enum.Enum ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 2
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Optional[int] ,*_lowerCAmelCase : Optional[Any] ,**_lowerCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_lowercase ,**_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__snake_case = None
if self.model.config.prefix is not None:
__snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__snake_case = self._sanitize_parameters(prefix=_lowercase ,**self._forward_params )
__snake_case = {**self._preprocess_params, **preprocess_params}
__snake_case = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Any=None ,_lowerCAmelCase : Optional[int]=None ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : Optional[Any]=None ,_lowerCAmelCase : Optional[int]=None ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : Tuple=None ,**_lowerCAmelCase : Union[str, Any] ,):
"""simple docstring"""
__snake_case = {}
if prefix is not None:
__snake_case = prefix
if prefix:
__snake_case = self.tokenizer(
_lowercase ,padding=_lowercase ,add_special_tokens=_lowercase ,return_tensors=self.framework )
__snake_case = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, \'hole\']" )
__snake_case = handle_long_generation
preprocess_params.update(_lowercase )
__snake_case = generate_kwargs
__snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
__snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
__snake_case = ReturnType.TENSORS
if return_type is not None:
__snake_case = return_type
if clean_up_tokenization_spaces is not None:
__snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case = self.tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : Union[str, Any] ,*_lowerCAmelCase : Tuple ,**_lowerCAmelCase : Any ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase ,**_lowercase )
def __call__( self : List[str] ,_lowerCAmelCase : Tuple ,**_lowerCAmelCase : Dict ):
"""simple docstring"""
return super().__call__(_lowercase ,**_lowercase )
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : int="" ,_lowerCAmelCase : int=None ,**_lowerCAmelCase : List[str] ):
"""simple docstring"""
__snake_case = self.tokenizer(
prefix + prompt_text ,padding=_lowercase ,add_special_tokens=_lowercase ,return_tensors=self.framework )
__snake_case = prompt_text
if handle_long_generation == "hole":
__snake_case = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__snake_case = generate_kwargs['''max_new_tokens''']
else:
__snake_case = generate_kwargs.get("max_length" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
__snake_case = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__snake_case = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : Any ,**_lowerCAmelCase : Dict ):
"""simple docstring"""
__snake_case = model_inputs['''input_ids''']
__snake_case = model_inputs.get("attention_mask" ,_lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__snake_case = None
__snake_case = None
__snake_case = 1
else:
__snake_case = input_ids.shape[0]
__snake_case = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__snake_case = generate_kwargs.pop("prefix_length" ,0 )
if prefix_length > 0:
__snake_case = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__snake_case = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__snake_case = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__snake_case = self.model.generate(input_ids=_lowercase ,attention_mask=_lowercase ,**_lowercase )
__snake_case = generated_sequence.shape[0]
if self.framework == "pt":
__snake_case = generated_sequence.reshape(_lowercase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
__snake_case = tf.reshape(_lowercase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Any ,_lowerCAmelCase : int=ReturnType.FULL_TEXT ,_lowerCAmelCase : int=True ):
"""simple docstring"""
__snake_case = model_outputs['''generated_sequence'''][0]
__snake_case = model_outputs['''input_ids''']
__snake_case = model_outputs['''prompt_text''']
__snake_case = generated_sequence.numpy().tolist()
__snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__snake_case = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__snake_case = self.tokenizer.decode(
_lowercase ,skip_special_tokens=_lowercase ,clean_up_tokenization_spaces=_lowercase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__snake_case = 0
else:
__snake_case = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=_lowercase ,clean_up_tokenization_spaces=_lowercase ,) )
if return_type == ReturnType.FULL_TEXT:
__snake_case = prompt_text + text[prompt_length:]
else:
__snake_case = text[prompt_length:]
__snake_case = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 524 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase="[CLS]" , _lowercase=1 , _lowercase="[SEP]" , _lowercase=False , _lowercase=False , _lowercase=0 , _lowercase=0 , _lowercase=-1_0_0 , _lowercase=0 , _lowercase=True , ) -> int:
'''simple docstring'''
snake_case_ : Tuple = {label: i for i, label in enumerate(_lowercase )}
snake_case_ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , _lowercase , len(_lowercase ) )
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
snake_case_ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
snake_case_ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : Optional[Any] = [cls_token] + tokens
snake_case_ : Tuple = [pad_token_label_id] + label_ids
snake_case_ : Tuple = [cls_token_segment_id] + segment_ids
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
snake_case_ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
snake_case_ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
snake_case_ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(_lowercase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(_lowercase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(_lowercase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(_lowercase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase=False , _lowercase = Split.train , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = os.path.join(
_lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
snake_case_ : Any = torch.load(_lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
snake_case_ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , _lowercase )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = -100
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase=False , _lowercase = Split.train , ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : int = tf.data.Dataset.from_generator(
_lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : int = tf.data.Dataset.from_generator(
_lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
return self.features[i]
| 58 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
if n_term == "":
return []
__SCREAMING_SNAKE_CASE = []
for temp in range(int(A__ ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
_snake_case : Optional[Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 693 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a__ : Tuple = object()
# For specifying empty leaf dict `{}`
a__ : str = object()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
snake_case__ = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(A__ ) - len(A__ ) + 1 ):
snake_case__ = [x.match(A__ ) for x, y in zip(A__ , ks[i:] )]
if matches and all(A__ ):
return True
return False
def __lowerCamelCase ( UpperCAmelCase_ ) ->Dict:
def replace(UpperCAmelCase_ , UpperCAmelCase_ ):
for rule, replacement in rules:
if _match(A__ , A__ ):
return replacement
return val
return replace
def __lowerCamelCase ( ) ->Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , A__ )),
(("transformer", "wte", "embedding"), P('mp' , A__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , A__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , A__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( UpperCAmelCase_ ) ->Optional[Any]:
snake_case__ = _get_partition_rules()
snake_case__ = _replacement_rules(A__ )
snake_case__ = {k: _unmatched for k in flatten_dict(A__ )}
snake_case__ = {k: replace(A__ , A__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A__ ) )
| 368 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 0 |
'''simple docstring'''
import baseaa
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
return baseaa.baadecode(A__ ).decode('''utf-8''' )
if __name__ == "__main__":
lowercase__ : Tuple = 'Hello World!'
lowercase__ : List[str] = baseaa_encode(test)
print(encoded)
lowercase__ : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 390 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 0 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
lowerCamelCase_ = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowercase : List[str] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowercase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows() | 142 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
"""simple docstring"""
import sys
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> Any:
_UpperCAmelCase : int = len(A__ )
_UpperCAmelCase : Tuple = [[0 for x in range(A__ )] for x in range(A__ )]
_UpperCAmelCase : str = [[0 for x in range(A__ )] for x in range(A__ )]
for chain_length in range(2, A__ ):
for a in range(1, n - chain_length + 1 ):
_UpperCAmelCase : List[str] = a + chain_length - 1
_UpperCAmelCase : Any = sys.maxsize
for c in range(A__, A__ ):
_UpperCAmelCase : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_UpperCAmelCase : Optional[int] = cost
_UpperCAmelCase : int = c
return matrix, sol
def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any, _lowerCAmelCase : Dict ) -> List[str]:
if i == j:
print("""A""" + str(A__ ), end=""" """ )
else:
print("""(""", end=""" """ )
print_optiomal_solution(A__, A__, optimal_solution[i][j] )
print_optiomal_solution(A__, optimal_solution[i][j] + 1, A__ )
print(""")""", end=""" """ )
def UpperCamelCase ( ) -> Tuple:
_UpperCAmelCase : Any = [30, 35, 15, 5, 10, 20, 25]
_UpperCAmelCase : List[str] = len(A__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_UpperCAmelCase : Any = matrix_chain_order(A__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(A__, 1, n - 1 )
if __name__ == "__main__":
main()
| 238 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict = None , __UpperCamelCase : int = None , __UpperCamelCase : Any = None , __UpperCamelCase : List[str] = None , __UpperCamelCase : int = None , __UpperCamelCase : Tuple = False , ) -> Any:
UpperCAmelCase_ = bnb_quantization_config.load_in_abit
UpperCAmelCase_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCAmelCase_ = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
UpperCAmelCase_ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase_ = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
UpperCAmelCase_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase_ = []
UpperCAmelCase_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
UpperCAmelCase_ = load_in_abit
UpperCAmelCase_ = load_in_abit
UpperCAmelCase_ = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCAmelCase_ = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
UpperCAmelCase_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase_ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCAmelCase_ = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
UpperCAmelCase_ = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
UpperCAmelCase_ = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase_ = True
UpperCAmelCase_ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Dict=None ) -> Union[str, Any]:
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase_ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCAmelCase_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase_ = {}
UpperCAmelCase_ = special_dtypes
UpperCAmelCase_ = no_split_module_classes
UpperCAmelCase_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase_ = get_balanced_memory(
A__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=A__ , **A__ , )
UpperCAmelCase_ = max_memory
UpperCAmelCase_ = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
UpperCAmelCase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Any=None ) -> str:
if modules_to_not_convert is None:
UpperCAmelCase_ = []
UpperCAmelCase_ = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=None , ) -> Optional[int]:
UpperCAmelCase_ = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_ = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase_ = '''.'''.join(A__ )
UpperCAmelCase_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCAmelCase_ = module.weight.data
if module.bias is not None:
UpperCAmelCase_ = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
UpperCAmelCase_ = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
UpperCAmelCase_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> str:
with init_empty_weights():
UpperCAmelCase_ = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase_ = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
UpperCAmelCase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase_ = sum(A__ , [] )
UpperCAmelCase_ = len(A__ ) > 0
# Check if it is a base model
UpperCAmelCase_ = False
if hasattr(A__ , '''base_model_prefix''' ):
UpperCAmelCase_ = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_ = list(model.named_children() )
UpperCAmelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_ = set(A__ ) - set(A__ )
UpperCAmelCase_ = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
UpperCAmelCase_ = ['''.weight''', '''.bias''']
UpperCAmelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_ = name.replace(A__ , '''''' )
filtered_module_names.append(A__ )
return filtered_module_names
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Optional[Any]:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
UpperCAmelCase_ = param_name
UpperCAmelCase_ = model
if "." in tensor_name:
UpperCAmelCase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase_ = getattr(A__ , A__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCAmelCase_ = new_module
UpperCAmelCase_ = splits[-1]
# offload weights
UpperCAmelCase_ = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , '''meta''' , dtype=A__ , value=torch.empty(*param.size() ) )
| 144 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A : Tuple = namedtuple("covid_data", "cases deaths recovered")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
_A = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(A__ ).content ).xpath(A__ ) )
__A : str = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 27 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.