code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A( self):
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : List[Any] = (3_2, 3_2)
__UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase__)
return image
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : str = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(lowercase__)
@property
def A( self):
def extract(*lowercase__ , **lowercase__):
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Optional[Any] = torch.ones([0])
def A( self , lowercase__):
self.pixel_values.to(lowercase__)
return self
return Out()
return extract
def A( self):
__UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : int = self.dummy_cond_unet
__UpperCAmelCase : Tuple = PNDMScheduler(skip_prk_steps=lowercase__)
__UpperCAmelCase : List[str] = self.dummy_vae
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
__UpperCAmelCase : Tuple = 7_7
__UpperCAmelCase : List[Any] = self.dummy_image.to(lowercase__)
__UpperCAmelCase : List[str] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : int = AltDiffusionImgaImgPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__)
__UpperCAmelCase : Optional[Any] = alt_pipe.to(lowercase__)
alt_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : Any = torch.Generator(device=lowercase__).manual_seed(0)
__UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase__ , )
__UpperCAmelCase : Any = output.images
__UpperCAmelCase : str = torch.Generator(device=lowercase__).manual_seed(0)
__UpperCAmelCase : Optional[Any] = alt_pipe(
[prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase__ , return_dict=lowercase__ , )[0]
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase : Optional[int] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def A( self):
__UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
__UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowercase__)
__UpperCAmelCase : Tuple = self.dummy_vae
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
__UpperCAmelCase : Optional[Any] = 7_7
__UpperCAmelCase : str = self.dummy_image.to(lowercase__)
# put models in fp16
__UpperCAmelCase : Optional[Any] = unet.half()
__UpperCAmelCase : str = vae.half()
__UpperCAmelCase : int = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = AltDiffusionImgaImgPipeline(
unet=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , safety_checker=lowercase__ , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase__)
__UpperCAmelCase : Dict = alt_pipe.to(lowercase__)
alt_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : List[Any] = torch.manual_seed(0)
__UpperCAmelCase : Dict = alt_pipe(
[prompt] , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , image=lowercase__ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def A( self):
__UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase : Dict = init_image.resize((7_6_0, 5_0_4))
__UpperCAmelCase : List[str] = '''BAAI/AltDiffusion'''
__UpperCAmelCase : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase__ , safety_checker=lowercase__ , )
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
__UpperCAmelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : int = torch.manual_seed(0)
__UpperCAmelCase : str = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase : str = output.images[0]
__UpperCAmelCase : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
__UpperCAmelCase : Any = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCAmelCase : Tuple = init_image.resize((7_6_8, 5_1_2))
__UpperCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
__UpperCAmelCase : int = '''BAAI/AltDiffusion'''
__UpperCAmelCase : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowercase__ , safety_checker=lowercase__ , )
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
__UpperCAmelCase : str = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : List[str] = torch.manual_seed(0)
__UpperCAmelCase : Optional[int] = pipe(
prompt=lowercase__ , image=lowercase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase__ , output_type='''np''' , )
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
from math import pi
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase = Lock()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : Tuple = min(lowercase_ , lowercase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : List[Any] = max(lowercase_ , lowercase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : int = Pipe()
__UpperCAmelCase : Dict = Pipe()
process_array_.append(
Process(
target=lowercase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Union[str, Any] = temp_rs
__UpperCAmelCase : List[str] = temp_rr
for i in range(1 , len(lowercase_ ) - 1 ):
__UpperCAmelCase : Union[str, Any] = Pipe()
__UpperCAmelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=lowercase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : List[str] = temp_rs
__UpperCAmelCase : List[str] = temp_rr
process_array_.append(
Process(
target=lowercase_ , args=(
len(lowercase_ ) - 1,
arr[len(lowercase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase_ ) ):
__UpperCAmelCase : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowercase_ )
__UpperCAmelCase : Any = odd_even_transposition(lowercase_ )
print('''Sorted List\n''' )
print(*lowercase_ )
if __name__ == "__main__":
main()
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 50000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
__UpperCAmelCase : int = int((limit - 24) ** (1 / 2) )
__UpperCAmelCase : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase_ ) ) )
for primea in primes:
__UpperCAmelCase : List[Any] = primea * primea
for primea in primes:
__UpperCAmelCase : List[str] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__UpperCAmelCase : Dict = primea * primea * primea * primea
__UpperCAmelCase : Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = RoCBertTokenizer
_lowerCAmelCase : Any = None
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = filter_non_english
def A( self):
super().setUp()
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : Union[str, Any] = {}
for i, value in enumerate(lowercase__):
__UpperCAmelCase : Dict = i
__UpperCAmelCase : Dict = i
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''])
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
with open(self.word_shape_file , '''w''' , encoding='''utf-8''') as word_shape_writer:
json.dump(lowercase__ , lowercase__ , ensure_ascii=lowercase__)
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''') as word_pronunciation_writer:
json.dump(lowercase__ , lowercase__ , ensure_ascii=lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__UpperCAmelCase : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''')
self.assertListEqual(lowercase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase__) , [5, 6, 2, 5, 7, 8])
def A( self):
__UpperCAmelCase : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def A( self):
__UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def A( self):
__UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A( self):
__UpperCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=lowercase__ , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A( self):
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase : Dict = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : List[Any] = i
__UpperCAmelCase : Optional[Any] = RoCBertWordpieceTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def A( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A( self):
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase__) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
if self.test_rust_tokenizer:
__UpperCAmelCase : Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase__) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
def A( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__UpperCAmelCase : int = tokenizer_r.encode_plus(
lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , )
__UpperCAmelCase : Dict = tokenizer_r.do_lower_case if hasattr(lowercase__ , '''do_lower_case''') else False
__UpperCAmelCase : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def A( self):
__UpperCAmelCase : Tuple = ['''的''', '''人''', '''有''']
__UpperCAmelCase : List[Any] = ''''''.join(lowercase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : int = True
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : int = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase__)
__UpperCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
__UpperCAmelCase : int = False
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Tuple = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(lowercase__)
__UpperCAmelCase : str = tokenizer_p.convert_ids_to_tokens(lowercase__)
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase : Any = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowercase__)
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@slow
def A( self):
__UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__UpperCAmelCase : Any = tokenizer.encode('''你好''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''你是谁''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A( self):
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=lowercase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__UpperCAmelCase : int = '''你好,你是谁'''
__UpperCAmelCase : List[str] = tokenizer.tokenize(lowercase__)
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : str = tokenizer.convert_tokens_to_shape_ids(lowercase__)
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(lowercase__)
__UpperCAmelCase : Any = tokenizer.prepare_for_model(
lowercase__ , lowercase__ , lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[str] = tokenizer.encode_plus(lowercase__ , add_special_tokens=lowercase__)
self.assertEqual(lowercase__ , lowercase__)
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = cn.convert_to_negative(lowercase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase_ , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCAmelCase : Optional[Any] = canny.canny(lowercase_ )
# assert canny array for at least one True
assert canny_array.any()
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
assert gg.gaussian_filter(lowercase_ , 5 , sigma=0.9 ).all()
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__UpperCAmelCase : Optional[Any] = conv.img_convolve(lowercase_ , lowercase_ ).astype(lowercase_ )
assert res.any()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
assert med.median_filter(lowercase_ , 3 ).any()
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = sob.sobel_filter(lowercase_ )
assert grad.any() and theta.any()
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = sp.make_sepia(lowercase_ , 20 )
assert sepia.all()
def __SCREAMING_SNAKE_CASE ( lowercase_ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = bs.Burkes(imread(lowercase_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __SCREAMING_SNAKE_CASE ( lowercase_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = rs.NearestNeighbour(imread(lowercase_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : str = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__UpperCAmelCase : Any = imread(lowercase_ , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[Any] = image[x_coordinate][y_coordinate]
__UpperCAmelCase : str = lbp.get_neighbors_pixel(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCAmelCase : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCAmelCase : Union[str, Any] = lbp.local_binary_value(lowercase_ , lowercase_ , lowercase_ )
assert lbp_image.any()
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModel)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase = get_logger(__name__)
class lowerCamelCase :
_lowerCAmelCase : Tuple = '''dummy_data'''
_lowerCAmelCase : int = '''datasets'''
_lowerCAmelCase : int = False
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = True , lowercase__ = None , ):
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[Any] = dataset_name
__UpperCAmelCase : Optional[Any] = cache_dir
__UpperCAmelCase : Union[str, Any] = use_local_dummy_data
__UpperCAmelCase : str = config
# download_callbacks take a single url as input
__UpperCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__UpperCAmelCase : List[str] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__UpperCAmelCase : int = str(lowercase__)
# to be downloaded
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : str = None
@property
def A( self):
if self._dummy_file is None:
__UpperCAmelCase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def A( self):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name)
@property
def A( self):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''')
def A( self):
__UpperCAmelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__UpperCAmelCase : Any = cached_path(
lowercase__ , cache_dir=self.cache_dir , extract_compressed_file=lowercase__ , force_extract=lowercase__)
return os.path.join(lowercase__ , self.dummy_file_name)
@property
def A( self):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def A( self):
if self._bucket_url is None:
__UpperCAmelCase : Tuple = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/'''))
return self._bucket_url
@property
def A( self):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''').split('''/''')[:-1])
def A( self , lowercase__ , *lowercase__):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__UpperCAmelCase : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__UpperCAmelCase : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase__ , lowercase__):
return self.create_dummy_data_dict(lowercase__ , lowercase__)
elif isinstance(lowercase__ , (list, tuple)):
return self.create_dummy_data_list(lowercase__ , lowercase__)
else:
return self.create_dummy_data_single(lowercase__ , lowercase__)
def A( self , lowercase__ , *lowercase__):
return self.download_and_extract(lowercase__)
def A( self , lowercase__ , lowercase__):
return self.download_and_extract(lowercase__)
def A( self , lowercase__ , *lowercase__ , **lowercase__):
return path
def A( self):
return {}
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase__ , lowercase__):
for single_url in single_urls:
download_callback(lowercase__)
else:
__UpperCAmelCase : Union[str, Any] = single_urls
download_callback(lowercase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = [os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__).name)) for x in single_urls]
else:
__UpperCAmelCase : str = single_urls
__UpperCAmelCase : Optional[Any] = os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__).name))
__UpperCAmelCase : Optional[int] = value
# make sure that values are unique
if all(isinstance(lowercase__ , lowercase__) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__UpperCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__UpperCAmelCase : List[str] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowercase__)) for url in data_url)
__UpperCAmelCase : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__UpperCAmelCase : Union[str, Any] = [data_url[0]] * len(lowercase__)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : Any = os.path.join(lowercase__ , urllib.parse.quote_plus(single_url.split('''/''')[-1]))
dummy_data_list.append(lowercase__)
return dummy_data_list
def A( self , lowercase__ , lowercase__):
for download_callback in self.download_callbacks:
download_callback(lowercase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : Any = os.path.join(lowercase__ , urllib.parse.quote_plus(data_url.split('''/''')[-1]))
if os.path.exists(lowercase__) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A( self):
pass
def A( self):
pass
def A( self , lowercase__):
def _iter_archive_members(lowercase__):
# this preserves the order of the members inside the ZIP archive
__UpperCAmelCase : Dict = Path(self.dummy_file).parent
__UpperCAmelCase : Union[str, Any] = path.relative_to(lowercase__)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__UpperCAmelCase : int = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(lowercase__)
__UpperCAmelCase : Union[str, Any] = Path(lowercase__)
__UpperCAmelCase : List[Any] = _iter_archive_members(lowercase__) if self.use_local_dummy_data else path.rglob('''*''')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''')):
yield file_path.relative_to(lowercase__).as_posix(), file_path.open('''rb''')
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase__):
if os.path.basename(lowercase__).startswith(('''.''', '''__''')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase__):
if os.path.basename(lowercase__).startswith(('''.''', '''__''')):
continue
dirnames.sort()
for filename in sorted(lowercase__):
if filename.startswith(('''.''', '''__''')):
continue
yield os.path.join(lowercase__ , lowercase__)
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), f"{len(SCREAMING_SNAKE_CASE_ )} != {len(SCREAMING_SNAKE_CASE_ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
try:
__UpperCAmelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(SCREAMING_SNAKE_CASE_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(SCREAMING_SNAKE_CASE_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = "student" , lowercase_ = None , lowercase_ = None , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
__UpperCAmelCase : Tuple = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).save_pretrained(SCREAMING_SNAKE_CASE_ ) # purely for convenience
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
else:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), f"teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE_ )}"
__UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCAmelCase : str = teacher_e
if d is None:
__UpperCAmelCase : str = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__UpperCAmelCase , __UpperCAmelCase : Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCAmelCase : Optional[int] = teacher_e
if d is None:
__UpperCAmelCase : int = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(SCREAMING_SNAKE_CASE_ )
# Copy weights
__UpperCAmelCase : List[str] = teacher.config_class(**SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCAmelCase : Union[str, Any] = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = list(range(SCREAMING_SNAKE_CASE_ ) ), list(range(SCREAMING_SNAKE_CASE_ ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCAmelCase : Union[str, Any] = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if d_layers_to_copy is None:
__UpperCAmelCase : Optional[Any] = pick_layers_to_copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
try:
if hasattr(
SCREAMING_SNAKE_CASE_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE_ )
copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE_ )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__UpperCAmelCase : Optional[int] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase = ["""names""", """prefix"""]
lowerCAmelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
lowerCAmelCase = ["""encoding_errors""", """on_bad_lines"""]
lowerCAmelCase = ["""date_format"""]
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
_lowerCAmelCase : str = ''','''
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = '''infer'''
_lowerCAmelCase : str = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = '''.'''
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = '''"'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = 1_0_0_0_0
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = '''strict'''
_lowerCAmelCase : List[Any] = '''error'''
_lowerCAmelCase : Optional[Any] = None
def A( self):
if self.delimiter is not None:
__UpperCAmelCase : List[Any] = self.delimiter
if self.column_names is not None:
__UpperCAmelCase : Union[str, Any] = self.column_names
@property
def A( self):
__UpperCAmelCase : Any = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
_lowerCAmelCase : List[str] = CsvConfig
def A( self):
return datasets.DatasetInfo(features=self.config.features)
def A( self , lowercase__):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase__ , (str, list, tuple)):
__UpperCAmelCase : Optional[Any] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__UpperCAmelCase : Dict = [files]
__UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCAmelCase : List[str] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__UpperCAmelCase : Tuple = [files]
__UpperCAmelCase : List[str] = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def A( self , lowercase__):
if self.config.features is not None:
__UpperCAmelCase : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__) for feature in self.config.features.values()):
# cheaper cast
__UpperCAmelCase : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__)
return pa_table
def A( self , lowercase__):
__UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCAmelCase : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__)):
__UpperCAmelCase : Dict = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase__):
__UpperCAmelCase : int = pa.Table.from_pandas(lowerCAmelCase__)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__)
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(lowerCAmelCase__)}: {e}")
raise
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = False
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCAmelCase = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCAmelCase = '' if has_file(args.repo_path, """config.json""") else 'unet'
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase = reader.read()
lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase = UNetaDModel(**config)
else:
lowerCAmelCase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase = config[key]
del config[key]
lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config['down_block_types']]
lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config['up_block_types']]
if do_only_weights:
lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase = param_value
lowerCAmelCase = True
if not has_changed:
lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''')
__UpperCAmelCase : Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''')
model.to(A_)
from datasets import load_dataset
__UpperCAmelCase : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''')
__UpperCAmelCase : int = dataset['''train'''][0]['''image'''].convert('''RGB''')
__UpperCAmelCase : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**A_)
__UpperCAmelCase : Tuple = outputs.logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , A_)
__UpperCAmelCase : List[str] = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4))
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import os
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = len(grid[0] )
__UpperCAmelCase : List[str] = len(_A )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
__UpperCAmelCase : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__UpperCAmelCase : Optional[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__UpperCAmelCase : str = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__UpperCAmelCase : Union[str, Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__UpperCAmelCase : List[Any] = max(
_A , _A , _A , _A )
if max_product > largest:
__UpperCAmelCase : Union[str, Any] = max_product
return largest
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = []
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__UpperCAmelCase : Any = [[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase ( __lowerCamelCase ):
_lowerCAmelCase : Dict = '''marian'''
_lowerCAmelCase : List[Any] = ['''past_key_values''']
_lowerCAmelCase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase__=5_8_1_0_1 , lowercase__=None , lowercase__=1_0_2_4 , lowercase__=1_2 , lowercase__=4_0_9_6 , lowercase__=1_6 , lowercase__=1_2 , lowercase__=4_0_9_6 , lowercase__=1_6 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_0_2_4 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=5_8_1_0_0 , lowercase__=False , lowercase__=5_8_1_0_0 , lowercase__=0 , lowercase__=0 , lowercase__=True , **lowercase__ , ):
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : str = decoder_vocab_size or vocab_size
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : Optional[int] = encoder_ffn_dim
__UpperCAmelCase : Optional[Any] = encoder_layers
__UpperCAmelCase : Union[str, Any] = encoder_attention_heads
__UpperCAmelCase : Optional[int] = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : Optional[int] = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : str = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : Optional[int] = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : Optional[Any] = encoder_layerdrop
__UpperCAmelCase : int = decoder_layerdrop
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class lowerCamelCase ( __lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A( self):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCAmelCase : Optional[int] = {0: 'batch'}
__UpperCAmelCase : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__UpperCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
__UpperCAmelCase : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCAmelCase : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCAmelCase : Any = self.num_layers
for i in range(UpperCAmelCase_):
__UpperCAmelCase : str = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__UpperCAmelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A( self):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : int = super().outputs
else:
__UpperCAmelCase : Optional[Any] = super(UpperCAmelCase_ , self).outputs
if self.use_past:
__UpperCAmelCase : List[str] = self.num_layers
for i in range(UpperCAmelCase_):
__UpperCAmelCase : str = {0: 'batch', 2: 'past_sequence + sequence'}
__UpperCAmelCase : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Generate decoder inputs
__UpperCAmelCase : Tuple = seq_length if not self.use_past else 1
__UpperCAmelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : Optional[int] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__UpperCAmelCase : int = dict(**UpperCAmelCase_ , **UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase : List[str] = common_inputs['input_ids'].shape
__UpperCAmelCase : Optional[Any] = common_inputs['decoder_input_ids'].shape[1]
__UpperCAmelCase : List[str] = self.num_attention_heads
__UpperCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : Any = decoder_seq_length + 3
__UpperCAmelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCAmelCase : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase_ , UpperCAmelCase_)] , dim=1)
__UpperCAmelCase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCAmelCase : Union[str, Any] = self.num_layers
__UpperCAmelCase : Optional[Any] = min(UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : Optional[int] = max(UpperCAmelCase_ , UpperCAmelCase_) - min_num_layers
__UpperCAmelCase : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
))
# TODO: test this.
__UpperCAmelCase : Union[str, Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)))
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCAmelCase : str = seqlen + 2
__UpperCAmelCase : Optional[Any] = self.num_layers
__UpperCAmelCase : Optional[Any] = self.num_attention_heads
__UpperCAmelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : str = common_inputs['attention_mask'].dtype
__UpperCAmelCase : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
__UpperCAmelCase : Optional[int] = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(UpperCAmelCase_)
]
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase : Any = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
__UpperCAmelCase : List[str] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase : Tuple = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__UpperCAmelCase : int = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
else:
__UpperCAmelCase : int = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
return common_inputs
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Any = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
else:
__UpperCAmelCase : List[str] = super(UpperCAmelCase_ , self)._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@property
def A( self):
return 1e-4
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 16 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Union[str, Any] = datasets.map(
__A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : str = 8
else:
__UpperCAmelCase : Optional[Any] = None
return tokenizer.pad(
__A , padding='''longest''' , max_length=__A , pad_to_multiple_of=__A , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
__UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __A ) == "1":
__UpperCAmelCase : Union[str, Any] = 2
# Initialize accelerator
__UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Dict = config['''lr''']
__UpperCAmelCase : Tuple = int(config['''num_epochs'''] )
__UpperCAmelCase : List[Any] = int(config['''seed'''] )
__UpperCAmelCase : Tuple = int(config['''batch_size'''] )
__UpperCAmelCase : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__A )
def inner_training_loop(lowercase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=__A )
__UpperCAmelCase : Tuple = get_dataloaders(__A , __A )
# Instantiate scheduler
__UpperCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase : List[Any] = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : List[str] = model(**__A )
__UpperCAmelCase : int = outputs.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**__A )
__UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__A , references=__A , )
__UpperCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , __A )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase : Tuple = parser.parse_args()
__UpperCAmelCase : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('''String lengths must match!''' )
__UpperCAmelCase : int = 0
for chara, chara in zip(__lowerCAmelCase , __lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 710 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _lowerCAmelCase )
print('''Decoded:''' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 711 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __SCREAMING_SNAKE_CASE ( lowercase_ = "isbn/0140328726" ) -> dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__UpperCAmelCase : Optional[int] = f"{olid} is not a valid Open Library olid"
raise ValueError(__lowerCAmelCase )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
'''simple docstring'''
__UpperCAmelCase : Any = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
__UpperCAmelCase : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__UpperCAmelCase : Optional[Any] = [
get_openlibrary_data(author['''key'''] )["""name"""] for author in data["""Authors"""]
]
__UpperCAmelCase : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase : Union[str, Any] = """, """.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowerCAmelCase = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("""\n""".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _snake_case ):
_lowerCAmelCase : Any = ['input_values', 'padding_mask']
def __init__( self , lowercase__ = 1 , lowercase__ = 2_4_0_0_0 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[int] = chunk_length_s
__UpperCAmelCase : List[Any] = overlap
@property
def A( self):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def A( self):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''')
elif padding is None:
# by default let's pad the inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = bool(
isinstance(lowercase__ , (list, tuple)) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list))))
if is_batched:
__UpperCAmelCase : Tuple = [np.asarray(lowercase__ , dtype=np.floataa).T for audio in raw_audio]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
__UpperCAmelCase : Tuple = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and raw_audio.dtype is np.dtype(np.floataa):
__UpperCAmelCase : int = raw_audio.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : str = [np.asarray(lowercase__).T]
# verify inputs are valid
for idx, example in enumerate(lowercase__):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels")
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[int] = BatchFeature({'''input_values''': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__UpperCAmelCase : Optional[int] = min(array.shape[0] for array in raw_audio)
__UpperCAmelCase : List[Any] = int(np.floor(max_length / self.chunk_stride))
__UpperCAmelCase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__UpperCAmelCase : int = max(array.shape[0] for array in raw_audio)
__UpperCAmelCase : List[Any] = int(np.ceil(max_length / self.chunk_stride))
__UpperCAmelCase : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
__UpperCAmelCase : List[str] = '''max_length'''
else:
__UpperCAmelCase : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
__UpperCAmelCase : Dict = self.pad(
lowercase__ , max_length=lowercase__ , truncation=lowercase__ , padding=lowercase__ , return_attention_mask=lowercase__ , )
if padding:
__UpperCAmelCase : Optional[int] = padded_inputs.pop('''attention_mask''')
__UpperCAmelCase : str = []
for example in padded_inputs.pop('''input_values'''):
if self.feature_size == 1:
__UpperCAmelCase : str = example[..., None]
input_values.append(example.T)
__UpperCAmelCase : Dict = input_values
if return_tensors is not None:
__UpperCAmelCase : str = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
| 713 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = 3.0
class lowerCamelCase ( unittest.TestCase ):
def A( self):
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {'''a''': 2, '''c''': 2.2_5})
@require_cuda
def A( self):
__UpperCAmelCase : Any = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2)
AcceleratorState._reset_state()
__UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
__UpperCAmelCase : Dict = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 2_0_0_0)
self.assertEqual(scaler._enabled , _UpperCAmelCase)
@require_multi_gpu
def A( self):
__UpperCAmelCase : Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase = torch.nn.Linear(100, 200)
lowerCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase = """"""
lowerCAmelCase = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
import os
import sys
lowerCAmelCase = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
return AutoConfig.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModel.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Tuple:
'''simple docstring'''
return AutoModel.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__A , **__A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__A , **__A )
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__UpperCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location='''cpu''' )['model']
# pop unnecessary weights
__UpperCAmelCase : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
__UpperCAmelCase : Any = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__UpperCAmelCase : Tuple = sd.pop(_lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__UpperCAmelCase : int = sd[key]
# We split QKV in separate Q,K,V
__UpperCAmelCase : List[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__UpperCAmelCase : Optional[int] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__UpperCAmelCase : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__UpperCAmelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__UpperCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
__UpperCAmelCase : int = q
__UpperCAmelCase : List[Any] = k
__UpperCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = load_checkpoint(_lowerCamelCase )
if config is not None:
__UpperCAmelCase : str = OPTConfig.from_pretrained(_lowerCamelCase )
else:
__UpperCAmelCase : Tuple = OPTConfig()
__UpperCAmelCase : Dict = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCamelCase ( _A ):
_lowerCAmelCase : Tuple = '''git_vision_model'''
def __init__( self , lowercase__=7_6_8 , lowercase__=3_0_7_2 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3 , lowercase__=2_2_4 , lowercase__=1_6 , lowercase__="quick_gelu" , lowercase__=1e-5 , lowercase__=0.0 , lowercase__=0.0_2 , **lowercase__ , ):
super().__init__(**__lowerCamelCase)
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : List[Any] = hidden_act
@classmethod
def A( cls , lowercase__ , **lowercase__):
cls._set_token_in_kwargs(__lowerCamelCase)
__UpperCAmelCase : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
__UpperCAmelCase : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase)
class lowerCamelCase ( _A ):
_lowerCAmelCase : Tuple = '''git'''
def __init__( self , lowercase__=None , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=6 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0_2_4 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=False , lowercase__=1_0_1 , lowercase__=1_0_2 , lowercase__=None , **lowercase__ , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase)
if vision_config is None:
__UpperCAmelCase : Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
__UpperCAmelCase : List[Any] = GitVisionConfig(**__lowerCamelCase)
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : List[Any] = position_embedding_type
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : Any = tie_word_embeddings
__UpperCAmelCase : Union[str, Any] = num_image_with_embedding
__UpperCAmelCase : List[str] = bos_token_id
__UpperCAmelCase : Dict = eos_token_id
def A( self):
__UpperCAmelCase : Any = copy.deepcopy(self.__dict__)
__UpperCAmelCase : Dict = self.vision_config.to_dict()
__UpperCAmelCase : Dict = self.__class__.model_type
return output
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> list[float]:
'''simple docstring'''
__UpperCAmelCase : int = coefficient_matrix.shape
__UpperCAmelCase : str = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a_ )
if colsa != 1:
__UpperCAmelCase : Optional[int] = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a_ )
if rowsa != rowsa:
__UpperCAmelCase : List[Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a_ )
if len(a_ ) != rowsa:
__UpperCAmelCase : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(a_ )} and {rowsa}"
)
raise ValueError(a_ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase : List[Any] = table.shape
strictly_diagonally_dominant(a_ )
# Iterates the whole matrix for given number of times
for _ in range(a_ ):
__UpperCAmelCase : List[Any] = []
for row in range(a_ ):
__UpperCAmelCase : Optional[int] = 0
for col in range(a_ ):
if col == row:
__UpperCAmelCase : List[str] = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : Optional[Any] = (temp + val) / denom
new_val.append(a_ )
__UpperCAmelCase : List[Any] = new_val
return [float(a_ ) for i in new_val]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : Tuple = table.shape
__UpperCAmelCase : Any = True
for i in range(0 , a_ ):
__UpperCAmelCase : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(_A , **_A )
__UpperCAmelCase : str = AutoModelForSeqaSeqLM.from_config(_A )
model.save_pretrained(_A )
AutoTokenizer.from_pretrained(_A ).save_pretrained(_A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
from __future__ import annotations
lowerCAmelCase = """#"""
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : List[str] = {}
def A( self , lowercase__):
__UpperCAmelCase : Tuple = self._trie
for char in text:
if char not in trie:
__UpperCAmelCase : str = {}
__UpperCAmelCase : Union[str, Any] = trie[char]
__UpperCAmelCase : str = True
def A( self , lowercase__):
__UpperCAmelCase : int = self._trie
for char in prefix:
if char in trie:
__UpperCAmelCase : Any = trie[char]
else:
return []
return self._elements(snake_case_)
def A( self , lowercase__):
__UpperCAmelCase : str = []
for c, v in d.items():
__UpperCAmelCase : Tuple = [''' '''] if c == END else [(c + s) for s in self._elements(snake_case_)]
result.extend(snake_case_)
return tuple(snake_case_)
lowerCAmelCase = Trie()
lowerCAmelCase = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = trie.find_word(lowercase_ )
return tuple(string + word for word in suffixes )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_lowercase).to(_lowercase)
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCAmelCase : Dict = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : str = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : int = model(input_ids.to(_lowercase) , labels=labels.to(_lowercase)).loss
__UpperCAmelCase : Union[str, Any] = -(labels.shape[-1] * loss.item())
__UpperCAmelCase : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = 0 ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = right or len(snake_case__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case__ , snake_case__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), f"{len(__SCREAMING_SNAKE_CASE )} != {len(__SCREAMING_SNAKE_CASE )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
try:
__UpperCAmelCase : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(__SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(__SCREAMING_SNAKE_CASE ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = "student" , lowercase_ = None , lowercase_ = None , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
__UpperCAmelCase : Dict = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ).save_pretrained(__SCREAMING_SNAKE_CASE ) # purely for convenience
__UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).eval()
else:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), f"teacher must be a model or string got type {type(__SCREAMING_SNAKE_CASE )}"
__UpperCAmelCase : Optional[int] = teacher.config.to_diff_dict()
try:
__UpperCAmelCase , __UpperCAmelCase : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCAmelCase : str = teacher_e
if d is None:
__UpperCAmelCase : int = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCAmelCase : Optional[int] = teacher_e
if d is None:
__UpperCAmelCase : Union[str, Any] = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__SCREAMING_SNAKE_CASE )
# Copy weights
__UpperCAmelCase : List[Any] = teacher.config_class(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = AutoModelForSeqaSeqLM.from_config(__SCREAMING_SNAKE_CASE )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=__SCREAMING_SNAKE_CASE )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCAmelCase , __UpperCAmelCase : Tuple = list(range(__SCREAMING_SNAKE_CASE ) ), list(range(__SCREAMING_SNAKE_CASE ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(__SCREAMING_SNAKE_CASE )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCAmelCase : Optional[int] = pick_layers_to_copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if d_layers_to_copy is None:
__UpperCAmelCase : str = pick_layers_to_copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
if hasattr(
__SCREAMING_SNAKE_CASE , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __SCREAMING_SNAKE_CASE )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __SCREAMING_SNAKE_CASE )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.decoder.block , student.decoder.block , __SCREAMING_SNAKE_CASE )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__UpperCAmelCase : str = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase :
def __init__( self , lowercase__ = "cpu" , lowercase__ = "openai/clip-vit-large-patch14"):
__UpperCAmelCase : Union[str, Any] = device
__UpperCAmelCase : Optional[Any] = CLIPTokenizerFast.from_pretrained(lowercase__)
__UpperCAmelCase : str = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__UpperCAmelCase : Any = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__UpperCAmelCase : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__UpperCAmelCase : Tuple = torchvision.transforms.Resize(2_2_4)
__UpperCAmelCase : Union[str, Any] = torchvision.transforms.CenterCrop(2_2_4)
def A( self , lowercase__):
__UpperCAmelCase : Any = self.resize(lowercase__)
__UpperCAmelCase : Optional[int] = self.center_crop(lowercase__)
__UpperCAmelCase : List[str] = self.normalize(lowercase__)
return images
def __call__( self , lowercase__=None , lowercase__=None , **lowercase__):
__UpperCAmelCase : Optional[int] = self.tokenizer(text=lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = self.preprocess_img(lowercase__)
__UpperCAmelCase : List[Any] = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__=1_0 , lowercase__=0.0_1 , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=True , lowercase__="image" , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , ):
super().__init__()
__UpperCAmelCase : Dict = None
__UpperCAmelCase : int = device if device else get_device()
if vqgan:
__UpperCAmelCase : Optional[Any] = vqgan
else:
__UpperCAmelCase : Union[str, Any] = load_vqgan(self.device , conf_path=lowercase__ , ckpt_path=lowercase__)
self.vqgan.eval()
if clip:
__UpperCAmelCase : Any = clip
else:
__UpperCAmelCase : Optional[int] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''')
self.clip.to(self.device)
__UpperCAmelCase : Any = ProcessorGradientFlow(device=self.device)
__UpperCAmelCase : Tuple = iterations
__UpperCAmelCase : List[Any] = lr
__UpperCAmelCase : Optional[int] = log
__UpperCAmelCase : Any = make_grid
__UpperCAmelCase : Tuple = return_val
__UpperCAmelCase : Union[str, Any] = quantize
__UpperCAmelCase : int = self.vqgan.decoder.z_shape
def A( self , lowercase__=None , lowercase__=None , lowercase__=5 , lowercase__=True):
__UpperCAmelCase : Optional[Any] = []
if output_path is None:
__UpperCAmelCase : List[str] = '''./animation.gif'''
if input_path is None:
__UpperCAmelCase : Union[str, Any] = self.save_path
__UpperCAmelCase : Any = sorted(glob(input_path + '''/*'''))
if not len(lowercase__):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''')
if len(lowercase__) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''')
__UpperCAmelCase : str = total_duration / len(lowercase__)
__UpperCAmelCase : Any = [frame_duration] * len(lowercase__)
if extend_frames:
__UpperCAmelCase : Optional[int] = 1.5
__UpperCAmelCase : Dict = 3
for file_name in paths:
if file_name.endswith('''.png'''):
images.append(imageio.imread(lowercase__))
imageio.mimsave(lowercase__ , lowercase__ , duration=lowercase__)
print(F"gif saved to {output_path}")
def A( self , lowercase__=None , lowercase__=None):
if not (path or img):
raise ValueError('''Input either path or tensor''')
if img is not None:
raise NotImplementedError
__UpperCAmelCase : Union[str, Any] = preprocess(Image.open(lowercase__) , target_image_size=2_5_6).to(self.device)
__UpperCAmelCase : Dict = preprocess_vqgan(lowercase__)
__UpperCAmelCase , *__UpperCAmelCase : List[Any] = self.vqgan.encode(lowercase__)
return z
def A( self , lowercase__):
__UpperCAmelCase : Dict = self.latent.detach().requires_grad_()
__UpperCAmelCase : str = base_latent + transform_vector
if self.quantize:
__UpperCAmelCase , *__UpperCAmelCase : int = self.vqgan.quantize(lowercase__)
else:
__UpperCAmelCase : List[Any] = trans_latent
return self.vqgan.decode(lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__=None):
__UpperCAmelCase : List[Any] = self.clip_preprocessor(text=lowercase__ , images=lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : int = self.clip(**lowercase__)
__UpperCAmelCase : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
__UpperCAmelCase : Any = similarity_logits * weights
return similarity_logits.sum()
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : str = self._get_clip_similarity(pos_prompts['''prompts'''] , lowercase__ , weights=(1 / pos_prompts['''weights''']))
if neg_prompts:
__UpperCAmelCase : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''] , lowercase__ , weights=neg_prompts['''weights'''])
else:
__UpperCAmelCase : List[str] = torch.tensor([1] , device=self.device)
__UpperCAmelCase : Tuple = -torch.log(lowercase__) + torch.log(lowercase__)
return loss
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = torch.randn_like(self.latent , requires_grad=lowercase__ , device=self.device)
__UpperCAmelCase : List[str] = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__UpperCAmelCase : Tuple = self._add_vector(lowercase__)
__UpperCAmelCase : Optional[int] = loop_post_process(lowercase__)
__UpperCAmelCase : Union[str, Any] = self._get_CLIP_loss(lowercase__ , lowercase__ , lowercase__)
print('''CLIP loss''' , lowercase__)
if self.log:
wandb.log({'''CLIP Loss''': clip_loss})
clip_loss.backward(retain_graph=lowercase__)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def A( self , lowercase__ , lowercase__ , lowercase__):
wandb.init(reinit=lowercase__ , project='''face-editor''')
wandb.config.update({'''Positive Prompts''': positive_prompts})
wandb.config.update({'''Negative Prompts''': negative_prompts})
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations})
if image_path:
__UpperCAmelCase : Any = Image.open(lowercase__)
__UpperCAmelCase : str = image.resize((2_5_6, 2_5_6))
wandb.log('''Original Image''' , wandb.Image(lowercase__))
def A( self , lowercase__):
if not prompts:
return []
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Any = []
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = [prompt.strip() for prompt in prompts.split('''|''')]
for prompt in prompts:
if isinstance(lowercase__ , (tuple, list)):
__UpperCAmelCase : Any = prompt[0]
__UpperCAmelCase : List[str] = float(prompt[1])
elif ":" in prompt:
__UpperCAmelCase , __UpperCAmelCase : int = prompt.split(''':''')
__UpperCAmelCase : List[str] = float(lowercase__)
else:
__UpperCAmelCase : Union[str, Any] = prompt
__UpperCAmelCase : Dict = 1.0
processed_prompts.append(lowercase__)
weights.append(lowercase__)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ , device=self.device),
}
def A( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=None , ):
if image_path:
__UpperCAmelCase : Union[str, Any] = self._get_latent(lowercase__)
else:
__UpperCAmelCase : Any = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowercase__ , lowercase__ , lowercase__)
assert pos_prompts, "You must provide at least one positive prompt."
__UpperCAmelCase : List[Any] = self.process_prompts(lowercase__)
__UpperCAmelCase : Any = self.process_prompts(lowercase__)
if save_final and save_path is None:
__UpperCAmelCase : Optional[int] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts''']))
if not os.path.exists(lowercase__):
os.makedirs(lowercase__)
else:
__UpperCAmelCase : Optional[int] = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase__)
__UpperCAmelCase : Tuple = save_path
__UpperCAmelCase : str = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('''Original Image''')
show_pil(custom_to_pil(lowercase__))
__UpperCAmelCase : List[str] = loop_post_process(lowercase__)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ , lowercase__ , lowercase__)):
if show_intermediate:
show_pil(lowercase__)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase__)})
if show_final:
show_pil(lowercase__)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(
features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
__UpperCAmelCase : Tuple = Generator(
cache_dir=__A , features=__A , generator=__A , gen_kwargs=__A , **__A , )
def A( self):
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : Optional[int] = self.builder.as_streaming_dataset(split='''train''')
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
__UpperCAmelCase : Optional[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=__A , in_memory=self.keep_in_memory)
return dataset
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __SCREAMING_SNAKE_CASE ( lowercase_="" ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa) - 0.5
__UpperCAmelCase : Union[str, Any] = AgentAudio(lowercase__)
__UpperCAmelCase : Any = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase__ , agent_type.to_raw() , atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase__))
# Ensure that the file contains the same value as the original tensor
__UpperCAmelCase : Any = sf.read(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , torch.tensor(lowercase__) , atol=1e-4))
def A( self):
__UpperCAmelCase : str = torch.rand(1_2 , dtype=torch.floataa) - 0.5
__UpperCAmelCase : int = get_new_path(suffix='''.wav''')
sf.write(lowercase__ , lowercase__ , 1_6_0_0_0)
__UpperCAmelCase : Tuple = AgentAudio(lowercase__)
self.assertTrue(torch.allclose(lowercase__ , agent_type.to_raw() , atol=1e-4))
self.assertEqual(agent_type.to_string() , lowercase__)
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Union[str, Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3))
__UpperCAmelCase : List[str] = AgentImage(lowercase__)
__UpperCAmelCase : int = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase__ , agent_type._tensor , atol=1e-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase__))
def A( self):
__UpperCAmelCase : Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / "000000039769.png"
__UpperCAmelCase : str = Image.open(lowercase__)
__UpperCAmelCase : List[str] = AgentImage(lowercase__)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase__))
def A( self):
__UpperCAmelCase : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''')) / "000000039769.png"
__UpperCAmelCase : Optional[int] = Image.open(lowercase__)
__UpperCAmelCase : Any = AgentImage(lowercase__)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase__))
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Any = "Hey!"
__UpperCAmelCase : List[str] = AgentText(lowercase__)
self.assertEqual(lowercase__ , agent_type.to_string())
self.assertEqual(lowercase__ , agent_type.to_raw())
self.assertEqual(lowercase__ , lowercase__)
| 704 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[int] = 2
while i * i <= n:
__UpperCAmelCase : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __a ):
def __init__( self , lowercase__):
super().__init__()
__UpperCAmelCase : Tuple = nn.ModuleList(a_)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(a_ , a_ , self.nets)):
__UpperCAmelCase : List[str] = controlnet(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
# merge samples
if i == 0:
__UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
__UpperCAmelCase : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a_ , a_)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A( self , lowercase__ , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a_ , is_main_process=a_ , save_function=a_ , safe_serialization=a_ , variant=a_ , )
idx += 1
__UpperCAmelCase : Tuple = model_path_to_save + F"_{idx}"
@classmethod
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(a_):
__UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(a_ , **a_)
controlnets.append(a_)
idx += 1
__UpperCAmelCase : str = pretrained_model_path + F"_{idx}"
logger.info(F"{len(a_)} controlnets loaded from {pretrained_model_path}.")
if len(a_) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(a_)}. Expected at least {pretrained_model_path + '_0'}.")
return cls(a_)
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
from __future__ import annotations
from typing import Any
class lowerCamelCase :
def __init__( self , lowercase__ = 6):
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
self.create_linked_list(__lowerCAmelCase)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = Node()
__UpperCAmelCase : Optional[int] = current_node
__UpperCAmelCase : Dict = current_node
__UpperCAmelCase : str = current_node
for _ in range(1 , __lowerCAmelCase):
__UpperCAmelCase : Optional[int] = Node()
__UpperCAmelCase : str = current_node
__UpperCAmelCase : Any = previous_node
__UpperCAmelCase : Any = current_node
__UpperCAmelCase : Dict = self.front
__UpperCAmelCase : str = previous_node
def A( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A( self , lowercase__):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__UpperCAmelCase : List[str] = self.rear.next
if self.rear:
__UpperCAmelCase : Tuple = data
def A( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__UpperCAmelCase : List[str] = self.front.data
__UpperCAmelCase : str = None
return data
__UpperCAmelCase : Dict = self.front
__UpperCAmelCase : Optional[Any] = old_front.next
__UpperCAmelCase : Tuple = old_front.data
__UpperCAmelCase : Optional[Any] = None
return data
def A( self):
if self.is_empty():
raise Exception('''Empty Queue''')
def A( self):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''')
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase ( _UpperCAmelCase ):
_lowerCAmelCase : Tuple = '''glpn'''
def __init__( self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=0.1 , lowercase__=1e-6 , lowercase__=6_4 , lowercase__=1_0 , lowercase__=-1 , **lowercase__ , ):
super().__init__(**lowercase_)
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : int = num_encoder_blocks
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[Any] = sr_ratios
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Any = patch_sizes
__UpperCAmelCase : Union[str, Any] = strides
__UpperCAmelCase : List[Any] = mlp_ratios
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Tuple = drop_path_rate
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = decoder_hidden_size
__UpperCAmelCase : Union[str, Any] = max_depth
__UpperCAmelCase : Dict = head_in_index
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
import sys
lowerCAmelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 1
for digit in s:
product *= int(_lowercase )
return product
def __SCREAMING_SNAKE_CASE ( lowercase_ = N ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = -sys.maxsize - 1
__UpperCAmelCase : Dict = n[:13]
__UpperCAmelCase : int = 13
while cur_index < len(_lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__UpperCAmelCase : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
__UpperCAmelCase : List[str] = max(_lowercase , str_eval(_lowercase ) )
__UpperCAmelCase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 709 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase :
def __init__( self , lowercase__ = None):
if components is None:
__UpperCAmelCase : str = []
__UpperCAmelCase : List[str] = list(__lowerCAmelCase)
def __len__( self):
return len(self.__components)
def __str__( self):
return "(" + ",".join(map(__lowerCAmelCase , self.__components)) + ")"
def __add__( self , lowercase__):
__UpperCAmelCase : List[Any] = len(self)
if size == len(__lowerCAmelCase):
__UpperCAmelCase : List[Any] = [self.__components[i] + other.component(__lowerCAmelCase) for i in range(__lowerCAmelCase)]
return Vector(__lowerCAmelCase)
else:
raise Exception('''must have the same size''')
def __sub__( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = len(self)
if size == len(__lowerCAmelCase):
__UpperCAmelCase : str = [self.__components[i] - other.component(__lowerCAmelCase) for i in range(__lowerCAmelCase)]
return Vector(__lowerCAmelCase)
else: # error case
raise Exception('''must have the same size''')
@overload
def __mul__( self , lowercase__):
...
@overload
def __mul__( self , lowercase__):
...
def __mul__( self , lowercase__):
if isinstance(__lowerCAmelCase , (float, int)):
__UpperCAmelCase : int = [c * other for c in self.__components]
return Vector(__lowerCAmelCase)
elif isinstance(__lowerCAmelCase , __lowerCAmelCase) and len(self) == len(__lowerCAmelCase):
__UpperCAmelCase : Dict = len(self)
__UpperCAmelCase : Optional[int] = [self.__components[i] * other.component(__lowerCAmelCase) for i in range(__lowerCAmelCase)]
return sum(__lowerCAmelCase)
else: # error case
raise Exception('''invalid operand!''')
def A( self):
return Vector(self.__components)
def A( self , lowercase__):
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception('''index out of range''')
def A( self , lowercase__ , lowercase__):
assert -len(self.__components) <= pos < len(self.__components)
__UpperCAmelCase : Dict = value
def A( self):
if len(self.__components) == 0:
raise Exception('''Vector is empty''')
__UpperCAmelCase : str = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCAmelCase))
def A( self , lowercase__ , lowercase__ = False):
__UpperCAmelCase : List[Any] = self * other
__UpperCAmelCase : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
return Vector([0] * dimension )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ))
__UpperCAmelCase : str = [0] * dimension
__UpperCAmelCase : Union[str, Any] = 1
return Vector(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (isinstance(UpperCAmelCase__ , (int, float) ))
)
return x * scalar + y
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
random.seed(UpperCAmelCase__ )
__UpperCAmelCase : Tuple = [random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = matrix
__UpperCAmelCase : str = w
__UpperCAmelCase : Any = h
def __str__( self):
__UpperCAmelCase : Tuple = ''''''
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , lowercase__):
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase : Optional[int] = []
for i in range(self.__height):
__UpperCAmelCase : List[str] = [
self.__matrix[i][j] + other.component(__lowerCAmelCase , __lowerCAmelCase)
for j in range(self.__width)
]
matrix.append(__lowerCAmelCase)
return Matrix(__lowerCAmelCase , self.__width , self.__height)
else:
raise Exception('''matrix must have the same dimension!''')
def __sub__( self , lowercase__):
if self.__width == other.width() and self.__height == other.height():
__UpperCAmelCase : Optional[Any] = []
for i in range(self.__height):
__UpperCAmelCase : Dict = [
self.__matrix[i][j] - other.component(__lowerCAmelCase , __lowerCAmelCase)
for j in range(self.__width)
]
matrix.append(__lowerCAmelCase)
return Matrix(__lowerCAmelCase , self.__width , self.__height)
else:
raise Exception('''matrices must have the same dimension!''')
@overload
def __mul__( self , lowercase__):
...
@overload
def __mul__( self , lowercase__):
...
def __mul__( self , lowercase__):
if isinstance(__lowerCAmelCase , __lowerCAmelCase): # matrix-vector
if len(__lowerCAmelCase) == self.__width:
__UpperCAmelCase : Optional[Any] = zero_vector(self.__height)
for i in range(self.__height):
__UpperCAmelCase : int = [
self.__matrix[i][j] * other.component(__lowerCAmelCase)
for j in range(self.__width)
]
ans.change_component(__lowerCAmelCase , sum(__lowerCAmelCase))
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''')
elif isinstance(__lowerCAmelCase , (int, float)): # matrix-scalar
__UpperCAmelCase : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(__lowerCAmelCase , self.__width , self.__height)
return None
def A( self):
return self.__height
def A( self):
return self.__width
def A( self , lowercase__ , lowercase__):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''')
def A( self , lowercase__ , lowercase__ , lowercase__):
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCAmelCase : Union[str, Any] = value
else:
raise Exception('''change_component: indices out of bounds''')
def A( self , lowercase__ , lowercase__):
if self.__height != self.__width:
raise Exception('''Matrix is not square''')
__UpperCAmelCase : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCAmelCase)):
__UpperCAmelCase : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def A( self , lowercase__ , lowercase__):
if self.__height != self.__width:
raise Exception('''Matrix is not square''')
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCAmelCase , __lowerCAmelCase)
else:
raise Exception('''Indices out of bounds''')
def A( self):
if self.__height != self.__width:
raise Exception('''Matrix is not square''')
if self.__height < 1:
raise Exception('''Matrix has no element''')
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCAmelCase : Any = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCAmelCase) for y in range(self.__width)
]
return sum(__lowerCAmelCase)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = [[0] * n for _ in range(UpperCAmelCase__ )]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
random.seed(UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = [
[random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )
]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 710 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 0 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 711 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = "cpu" , lowercase_ = None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__UpperCAmelCase : Any = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 713 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase = True
except ImportError:
lowerCAmelCase = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase = _get_torch_home()
except ImportError:
lowerCAmelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCAmelCase = os.path.join(torch_cache_home, """transformers""")
lowerCAmelCase = """https://cdn.huggingface.co"""
lowerCAmelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCAmelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCAmelCase = os.path.join(PATH, """config.yaml""")
lowerCAmelCase = os.path.join(PATH, """attributes.txt""")
lowerCAmelCase = os.path.join(PATH, """objects.txt""")
lowerCAmelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCAmelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase = """pytorch_model.bin"""
lowerCAmelCase = """config.yaml"""
def __SCREAMING_SNAKE_CASE ( lowercase_=OBJECTS , lowercase_=ATTRIBUTES ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase : Optional[int] = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = OrderedDict()
with open(lowercase_ , '''rb''' ) as f:
__UpperCAmelCase : Any = pkl.load(lowercase_ )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : Optional[int] = ckp.pop(lowercase_ )
if isinstance(lowercase_ , np.ndarray ):
__UpperCAmelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ , torch.tensor ), type(lowercase_ )
__UpperCAmelCase : str = v
return r
class lowerCamelCase :
_lowerCAmelCase : Dict = {}
def __init__( self , lowercase__ , lowercase__ = "root" , lowercase__=0):
__UpperCAmelCase : Dict = name
__UpperCAmelCase : Optional[Any] = level
__UpperCAmelCase : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[Any] = copy.deepcopy(lowercase__)
__UpperCAmelCase : List[str] = copy.deepcopy(lowercase__)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = Config(lowercase__ , name=lowercase__ , level=level + 1)
__UpperCAmelCase : Union[str, Any] = v
setattr(self , lowercase__ , lowercase__)
__UpperCAmelCase : int = d
def __repr__( self):
return str(list((self._pointer.keys())))
def __setattr__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = val
__UpperCAmelCase : str = val
__UpperCAmelCase : Optional[int] = key.split('''.''')
__UpperCAmelCase : List[str] = len(lowercase__) - 1
__UpperCAmelCase : Tuple = self._pointer
if len(lowercase__) > 1:
for i, l in enumerate(lowercase__):
if hasattr(self , lowercase__) and isinstance(getattr(self , lowercase__) , lowercase__):
setattr(getattr(self , lowercase__) , '''.'''.join(levels[i:]) , lowercase__)
if l == last_level:
__UpperCAmelCase : Optional[Any] = val
else:
__UpperCAmelCase : Tuple = pointer[l]
def A( self):
return self._pointer
def A( self , lowercase__ , lowercase__):
with open(F"{file_name}" , '''w''') as stream:
dump(lowercase__ , lowercase__)
def A( self , lowercase__ , lowercase__):
with open(F"{file_name}" , '''w''') as stream:
json.dump(lowercase__ , lowercase__)
@staticmethod
def A( lowercase__):
with open(lowercase__) as stream:
__UpperCAmelCase : Any = load(lowercase__ , Loader=lowercase__)
return data
def __str__( self):
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Dict = F"{t * (self._level-1)}{self._name}:\n"
else:
__UpperCAmelCase : Dict = """"""
__UpperCAmelCase : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowercase__ , lowercase__):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(lowercase__).__name__})\n"
__UpperCAmelCase : Any = level
return r[:-1]
@classmethod
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = cls.get_config_dict(lowercase__ , **lowercase__)
return cls(lowercase__)
@classmethod
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[Any] = kwargs.pop('''cache_dir''' , lowercase__)
__UpperCAmelCase : int = kwargs.pop('''force_download''' , lowercase__)
__UpperCAmelCase : Optional[Any] = kwargs.pop('''resume_download''' , lowercase__)
__UpperCAmelCase : str = kwargs.pop('''proxies''' , lowercase__)
__UpperCAmelCase : int = kwargs.pop('''local_files_only''' , lowercase__)
if os.path.isdir(lowercase__):
__UpperCAmelCase : int = os.path.join(lowercase__ , lowercase__)
elif os.path.isfile(lowercase__) or is_remote_url(lowercase__):
__UpperCAmelCase : int = pretrained_model_name_or_path
else:
__UpperCAmelCase : List[str] = hf_bucket_url(lowercase__ , filename=lowercase__ , use_cdn=lowercase__)
try:
# Load from URL or cache if already cached
__UpperCAmelCase : str = cached_path(
lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , local_files_only=lowercase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Tuple = Config.load_yaml(lowercase__)
except EnvironmentError:
__UpperCAmelCase : Any = """Can't load config for"""
raise EnvironmentError(lowercase__)
if resolved_config_file == config_file:
print('''loading configuration file from path''')
else:
print('''loading configuration file cache''')
return Config.load_yaml(lowercase__), kwargs
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase : Optional[int] = in_tensor.numpy()
__UpperCAmelCase : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase_ , lowercase_ , rtol=0.0_1 , atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowercase_ , lowercase_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=True ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[Any] = """/""" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=0 , lowercase_=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ , lowercase_ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowercase_ , lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ , lowercase_ ):
ua += "; " + user_agent
__UpperCAmelCase : Any = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : List[str] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : int = requests.get(lowercase_ , stream=lowercase_ , proxies=lowercase_ , headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : Optional[Any] = response.headers.get('''Content-Length''' )
__UpperCAmelCase : int = resume_size + int(lowercase_ ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit='''B''' , unit_scale=lowercase_ , total=lowercase_ , initial=lowercase_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=10 , lowercase_=False , lowercase_=None , lowercase_=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
__UpperCAmelCase : Tuple = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : Optional[Any] = str(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
__UpperCAmelCase : Union[str, Any] = None
if not local_files_only:
try:
__UpperCAmelCase : int = requests.head(lowercase_ , allow_redirects=lowercase_ , proxies=lowercase_ , timeout=lowercase_ )
if response.status_code == 200:
__UpperCAmelCase : Any = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : int = url_to_filename(lowercase_ , lowercase_ )
# get cache path to put the file
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
__UpperCAmelCase : Tuple = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : Tuple = cache_path + """.lock"""
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : Optional[Any] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowercase_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase : Tuple = _resumable_file_manager
if os.path.exists(lowercase_ ):
__UpperCAmelCase : Optional[Any] = os.stat(lowercase_ ).st_size
else:
__UpperCAmelCase : Dict = 0
else:
__UpperCAmelCase : int = partial(tempfile.NamedTemporaryFile , dir=lowercase_ , delete=lowercase_ )
__UpperCAmelCase : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowercase_ , temp_file.name , )
http_get(
lowercase_ , lowercase_ , proxies=lowercase_ , resume_size=lowercase_ , user_agent=lowercase_ , )
os.replace(temp_file.name , lowercase_ )
__UpperCAmelCase : str = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Dict = cache_path + """.json"""
with open(lowercase_ , '''w''' ) as meta_file:
json.dump(lowercase_ , lowercase_ )
return cache_path
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = url.encode('''utf-8''' )
__UpperCAmelCase : List[Any] = shaaaa(lowercase_ )
__UpperCAmelCase : Union[str, Any] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : str = etag.encode('''utf-8''' )
__UpperCAmelCase : Dict = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=False , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : Optional[Any] = str(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Optional[int] = get_from_cache(
lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , proxies=lowercase_ , resume_download=lowercase_ , user_agent=lowercase_ , local_files_only=lowercase_ , )
elif os.path.exists(lowercase_ ):
# File, and it exists.
__UpperCAmelCase : Any = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowercase_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase : str = os.path.split(lowercase_ )
__UpperCAmelCase : Optional[int] = output_file.replace('''.''' , '''-''' ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(lowercase_ , lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : Tuple = output_path + """.lock"""
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ , ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ , '''r''' ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
__UpperCAmelCase : Tuple = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowercase_ ) )
return output_path_extracted
return output_path
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_="," ) -> int:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
__UpperCAmelCase : Dict = eval(f.read() )
else:
__UpperCAmelCase : Dict = requests.get(lowercase_ )
try:
__UpperCAmelCase : Optional[Any] = requests.json()
except Exception:
__UpperCAmelCase : str = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : Optional[Any] = eval(lowercase_ )
except Exception:
__UpperCAmelCase : Optional[int] = data.split('''\n''' )
req.close()
return data
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = requests.get(lowercase_ )
__UpperCAmelCase : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ , '''rb''' ) as stream:
__UpperCAmelCase : Dict = pkl.load(lowercase_ )
__UpperCAmelCase : Tuple = weights.pop('''model''' )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : List[str] = torch.from_numpy(lowercase_ )
if "running_var" in k:
__UpperCAmelCase : List[Any] = torch.tensor([0] )
__UpperCAmelCase : Optional[int] = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase : Any = zero
return new
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
print(f"{os.path.abspath(os.path.join(lowercase_ , os.pardir ) )}/demo.ipynb" )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_="RGB" ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
__UpperCAmelCase : Tuple = cva.imread(lowercase_ )
else:
__UpperCAmelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, f"could not connect to: {im}"
__UpperCAmelCase : Union[str, Any] = cva.cvtColor(lowercase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Dict = img[:, :, ::-1]
return img
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1 ) -> Tuple:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowercase_ ) , lowercase_ ))
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase : List[Any] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Dict = model.state_dict()
def to_tf_var_name(lowercase_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
__UpperCAmelCase : Tuple = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"bert/{name}"
def create_tf_var(lowercase_ , lowercase_ , lowercase_ ):
__UpperCAmelCase : int = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase : List[str] = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase : Union[str, Any] = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase : Any = torch_tensor.T
__UpperCAmelCase : Any = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : str = session.run(SCREAMING_SNAKE_CASE_ )
print(f"Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
__UpperCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase : str = parser.parse_args(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
def __init__( self , *lowercase__ , **lowercase__):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_)
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
if any(not isinstance(lowercase_ , lowercase_ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(lowercase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowercase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = len(__lowerCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
__UpperCAmelCase : Union[str, Any] = 0
print(__lowerCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=''',''' )
__UpperCAmelCase : Union[str, Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = [1, 3, 0, 5, 8, 5]
lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = LxmertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
__UpperCAmelCase : List[str] = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase = 250_004
lowerCAmelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __lowercase , unittest.TestCase ):
_lowerCAmelCase : int = MBartTokenizer
_lowerCAmelCase : Optional[int] = MBartTokenizerFast
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = True
def A( self):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Optional[int] = MBartTokenizer(__a , keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def A( self):
__UpperCAmelCase : List[str] = MBartTokenizer(__a , keep_accents=__a)
__UpperCAmelCase : List[str] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__UpperCAmelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def A( self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__UpperCAmelCase : str = self.tokenizer_class.from_pretrained(__a , **__a)
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
__UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(__a)
__UpperCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
__UpperCAmelCase : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(__a , __a)
# Checks everything loads correctly in the same way
__UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(__a)
__UpperCAmelCase : Optional[int] = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a)
__UpperCAmelCase : Any = tokenizer_p.save_pretrained(__a)
# Checks it save with the same files
self.assertSequenceEqual(__a , __a)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(__a)
__UpperCAmelCase : Optional[int] = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a))
shutil.rmtree(__a)
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Dict = tempfile.mkdtemp()
__UpperCAmelCase : Any = tokenizer_r.save_pretrained(__a , legacy_format=__a)
__UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(__a)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(__a)
__UpperCAmelCase : Dict = tokenizer_p.from_pretrained(__a)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a))
shutil.rmtree(__a)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
_lowerCAmelCase : Dict = '''facebook/mbart-large-en-ro'''
_lowerCAmelCase : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCAmelCase : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCAmelCase : Optional[int] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def A( cls):
__UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''')
__UpperCAmelCase : List[Any] = 1
return cls
def A( self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0)
def A( self):
__UpperCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a)
def A( self):
self.assertIn(__a , self.tokenizer.all_special_ids)
__UpperCAmelCase : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__UpperCAmelCase : Tuple = self.tokenizer.decode(__a , skip_special_tokens=__a)
__UpperCAmelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a)
self.assertEqual(__a , __a)
self.assertNotIn(self.tokenizer.eos_token , __a)
def A( self):
__UpperCAmelCase : str = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __a)
__UpperCAmelCase : Dict = 1_0
__UpperCAmelCase : Union[str, Any] = self.tokenizer(__a , max_length=__a , truncation=__a).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __a)
self.assertEqual(len(__a) , __a)
def A( self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) , [2_5_0_0_2_6, 2_5_0_0_0_1])
def A( self):
__UpperCAmelCase : str = tempfile.mkdtemp()
__UpperCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a)
__UpperCAmelCase : Optional[int] = MBartTokenizer.from_pretrained(__a)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a)
@require_torch
def A( self):
__UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors='''pt''')
__UpperCAmelCase : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A( self):
__UpperCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
__UpperCAmelCase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(__a , __a)
self.assertEqual((2, 1_4) , batch.input_ids.shape)
self.assertEqual((2, 1_4) , batch.attention_mask.shape)
__UpperCAmelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def A( self):
__UpperCAmelCase : Any = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='''pt''')
__UpperCAmelCase : int = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=1_0 , return_tensors='''pt''')
__UpperCAmelCase : Optional[int] = targets["""input_ids"""]
__UpperCAmelCase : Optional[int] = shift_tokens_right(__a , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def A( self):
__UpperCAmelCase : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''')
self.assertEqual(
nested_simplify(__a) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_0 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=None , ):
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : int = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : int = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : str = (image_size // patch_size) ** 2
__UpperCAmelCase : List[str] = num_patches + 1
def A( self):
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def A( self):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = TFViTModel(config=UpperCAmelCase__)
__UpperCAmelCase : Any = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
__UpperCAmelCase : Dict = self.image_size // 2
__UpperCAmelCase : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
__UpperCAmelCase : Optional[Any] = model(UpperCAmelCase__ , interpolate_pos_encoding=UpperCAmelCase__ , training=UpperCAmelCase__)
__UpperCAmelCase : Optional[int] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = self.type_sequence_label_size
__UpperCAmelCase : int = TFViTForImageClassification(UpperCAmelCase__)
__UpperCAmelCase : str = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
__UpperCAmelCase : Union[str, Any] = self.image_size // 2
__UpperCAmelCase : Dict = pixel_values[:, :, :image_size, :image_size]
__UpperCAmelCase : Optional[Any] = model(UpperCAmelCase__ , interpolate_pos_encoding=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Union[str, Any] = TFViTForImageClassification(UpperCAmelCase__)
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCAmelCase : List[str] = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : Tuple = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : Dict = False
_lowerCAmelCase : str = False
_lowerCAmelCase : Tuple = False
def A( self):
__UpperCAmelCase : str = TFViTModelTester(self)
__UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def A( self):
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(UpperCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , tf.keras.layers.Layer))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(UpperCAmelCase__)
__UpperCAmelCase : Dict = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def A( self):
__UpperCAmelCase : str = TFViTModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(UpperCAmelCase__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def A( self):
__UpperCAmelCase : Any = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''')
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Any = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
__UpperCAmelCase : str = model(**UpperCAmelCase__)
# verify the logits
__UpperCAmelCase : Tuple = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
__UpperCAmelCase : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
__UpperCAmelCase : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
__UpperCAmelCase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=9_9 , lowercase__=1_6 , lowercase__=2 , lowercase__=4 , lowercase__=4 , lowercase__="relu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=2_0 , lowercase__=2 , lowercase__=1 , lowercase__=0 , ):
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = encoder_layerdrop
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Optional[Any] = eos_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
def A( self):
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Any = self.eos_token_id # Eos Token
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase : List[Any] = input_ids.clamp(self.pad_token_id + 1)
__UpperCAmelCase : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1)
__UpperCAmelCase : Tuple = self.get_config()
__UpperCAmelCase : Dict = prepare_mam_aaa_inputs_dict(A__ , A__ , A__)
return config, inputs_dict
def A( self):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : str = MaMaaaModel(config=A__).get_decoder().to(A__).eval()
__UpperCAmelCase : Optional[int] = inputs_dict['''input_ids''']
__UpperCAmelCase : int = inputs_dict['''attention_mask''']
__UpperCAmelCase : Dict = inputs_dict['''head_mask''']
# first forward pass
__UpperCAmelCase : Optional[Any] = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : int = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : Union[str, Any] = model(A__ , attention_mask=A__)['''last_hidden_state''']
__UpperCAmelCase : Any = model(A__ , attention_mask=A__ , past_key_values=A__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-2))
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : int = MaMaaaModel(config=A__).to(A__).eval()
__UpperCAmelCase : Union[str, Any] = model(**A__)
__UpperCAmelCase : Optional[Any] = outputs.encoder_last_hidden_state
__UpperCAmelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = model.get_encoder()
encoder.save_pretrained(A__)
__UpperCAmelCase : Any = MaMaaaEncoder.from_pretrained(A__).to(A__)
__UpperCAmelCase : int = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = model.get_decoder()
decoder.save_pretrained(A__)
__UpperCAmelCase : Optional[Any] = MaMaaaDecoder.from_pretrained(A__).to(A__)
__UpperCAmelCase : Union[str, Any] = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=A__ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class __A ( __a , __a , __a , unittest.TestCase ):
_lowerCAmelCase : List[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : int = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[str] = False
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A( self):
__UpperCAmelCase : str = MaMaaaModelTester(self)
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=A__)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(A__)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__)
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model_class.from_pretrained(A__ , output_loading_info=A__)
self.assertEqual(info['''missing_keys'''] , [])
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCAmelCase : int = model_class(A__)
model.to(A__)
model.eval()
__UpperCAmelCase : int = copy.deepcopy(self._prepare_for_class(A__ , A__))
if not self.is_encoder_decoder:
__UpperCAmelCase : Dict = inputs['''input_ids''']
del inputs["input_ids"]
else:
__UpperCAmelCase : str = inputs['''input_ids''']
__UpperCAmelCase : List[str] = inputs.get('''decoder_input_ids''' , A__)
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , A__)
__UpperCAmelCase : int = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCAmelCase : List[str] = wte(A__)
else:
__UpperCAmelCase : int = wte(A__)
__UpperCAmelCase : List[Any] = wte(A__)
with torch.no_grad():
model(**A__)[0]
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : int = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(A__)
__UpperCAmelCase : Dict = MaMaaaForConditionalGeneration(A__).eval().to(A__)
if torch_device == "cuda":
model.half()
model.generate(A__ , attention_mask=A__)
model.generate(num_beams=4 , do_sample=A__ , early_stopping=A__ , num_return_sequences=3)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
return torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ )
lowerCAmelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A ( unittest.TestCase ):
@cached_property
def A( self):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''')
def A( self):
__UpperCAmelCase : List[Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''').to(A__)
__UpperCAmelCase : str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]])
__UpperCAmelCase : Any = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]])
__UpperCAmelCase : List[Any] = prepare_mam_aaa_inputs_dict(model.config , A__ , A__)
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**A__)[0]
__UpperCAmelCase : List[Any] = torch.Size((1, 1_1, 1_0_2_4))
self.assertEqual(output.shape , A__)
# change to expected output here
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=A__)
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=A__))
def A( self):
__UpperCAmelCase : str = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(A__)
# change to intended input
__UpperCAmelCase : List[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]])
__UpperCAmelCase : Dict = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]])
__UpperCAmelCase : Any = prepare_mam_aaa_inputs_dict(model.config , A__ , A__)
with torch.no_grad():
__UpperCAmelCase : Dict = model(**A__)[0]
__UpperCAmelCase : str = torch.Size((1, 1_1, model.config.vocab_size))
self.assertEqual(output.shape , A__)
# change to expected output here
__UpperCAmelCase : Any = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=A__)
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=A__))
def A( self):
__UpperCAmelCase : Tuple = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''').to(A__)
__UpperCAmelCase : int = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''')
__UpperCAmelCase : Dict = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCAmelCase : Tuple = tokenizer(A__ , padding=A__ , return_tensors='''pt''')
__UpperCAmelCase : int = model.generate(
input_ids=dct['''input_ids'''].to(A__) , attention_mask=dct['''attention_mask'''].to(A__) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''') , )
__UpperCAmelCase : Tuple = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A__ , skip_special_tokens=A__)
assert generated == expected_en
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
return len(set(_SCREAMING_SNAKE_CASE ) ) == len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase = float("""nan""")
class lowerCamelCase :
def __init__( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = sys.stdout
__UpperCAmelCase : List[Any] = open(lowercase__ , '''a''')
def __getattr__( self , lowercase__):
return getattr(self.stdout , lowercase__)
def A( self , lowercase__):
self.stdout.write(lowercase__)
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , lowercase__ , 0 , re.M))
def __SCREAMING_SNAKE_CASE ( lowercase_=80 , lowercase_=False ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = []
# deal with critical env vars
__UpperCAmelCase : List[str] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
__UpperCAmelCase : List[Any] = os.environ.get(lowerCAmelCase_ , lowerCAmelCase_ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
__UpperCAmelCase : Any = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(lowerCAmelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = ''''''
while len(lowerCAmelCase_ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCAmelCase_ )
__UpperCAmelCase : Optional[Any] = ''''''
return "\\\n".join(lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
__UpperCAmelCase : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
__UpperCAmelCase : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
__UpperCAmelCase : int = subprocess.run(lowerCAmelCase_ , capture_output=lowerCAmelCase_ , text=lowerCAmelCase_ )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
__UpperCAmelCase : Optional[Any] = variation.replace(''' ''' , '''-''' )
with open(Path(lowerCAmelCase_ ) / f"log.{prefix}.stdout.txt" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(lowerCAmelCase_ ) / f"log.{prefix}.stderr.txt" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Union[str, Any] = json.load(lowerCAmelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Dict = f"{id}: {variation:<{longest_variation_len}}"
__UpperCAmelCase : int = f"{preamble}: "
__UpperCAmelCase : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCAmelCase_ ) , desc=lowerCAmelCase_ , leave=lowerCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = process_run_single(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = single_run_metrics[target_metric_key]
if not math.isnan(lowerCAmelCase_ ):
metrics.append(lowerCAmelCase_ )
results.append(lowerCAmelCase_ )
outcome += "✓"
else:
outcome += "✘"
__UpperCAmelCase : Any = f"\33[2K\r{outcome}"
if len(lowerCAmelCase_ ) > 0:
__UpperCAmelCase : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__UpperCAmelCase : str = round(mean_metrics[target_metric_key] , 2 )
__UpperCAmelCase : Any = f"{outcome} {mean_target}"
if len(lowerCAmelCase_ ) > 1:
results_str += f" {tuple(round(lowerCAmelCase_ , 2 ) for x in results )}"
print(lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = variation
return mean_metrics
else:
print(lowerCAmelCase_ )
return {variation_key: variation, target_metric_key: nan}
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = pd.DataFrame(lowerCAmelCase_ )
__UpperCAmelCase : Tuple = '''variation'''
__UpperCAmelCase : Any = '''diff_%'''
__UpperCAmelCase : Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__UpperCAmelCase : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCAmelCase_ ):
# as a fallback, use the minimal value as the sentinel
__UpperCAmelCase : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCAmelCase_ ):
__UpperCAmelCase : str = df.apply(
lambda lowercase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
__UpperCAmelCase : Optional[int] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__UpperCAmelCase : List[str] = df.reindex(lowerCAmelCase_ , axis='''columns''' ) # reorder cols
# capitalize
__UpperCAmelCase : List[str] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
__UpperCAmelCase : str = df.rename(lambda lowercase_ : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
__UpperCAmelCase : Any = df.rename(lambda lowercase_ : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
__UpperCAmelCase : List[str] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )]
print('''\n\n'''.join(lowerCAmelCase_ ) )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , nargs='''+''' , required=lowerCAmelCase_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=lowerCAmelCase_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=lowerCAmelCase_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=lowerCAmelCase_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=lowerCAmelCase_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
__UpperCAmelCase : Any = parser.parse_args()
__UpperCAmelCase : Dict = args.output_dir
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
__UpperCAmelCase : Any = get_base_command(lowerCAmelCase_ , lowerCAmelCase_ )
# split each dimension into its --foo variations
__UpperCAmelCase : Optional[int] = [list(map(str.strip , re.split(r'''\|''' , lowerCAmelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__UpperCAmelCase : int = list(map(str.strip , map(''' '''.join , itertools.product(*lowerCAmelCase_ ) ) ) )
__UpperCAmelCase : Dict = max(len(lowerCAmelCase_ ) for x in variations )
# split wanted keys
__UpperCAmelCase : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
__UpperCAmelCase : Any = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script\'s output is also piped into {report_fn}" )
__UpperCAmelCase : List[Any] = Tee(lowerCAmelCase_ )
print(f"\n*** Running {len(lowerCAmelCase_ )} benchmarks:" )
print(f"Base command: {' '.join(lowerCAmelCase_ )}" )
__UpperCAmelCase : List[Any] = '''variation'''
__UpperCAmelCase : Tuple = []
for id, variation in enumerate(tqdm(lowerCAmelCase_ , desc='''Total completion: ''' , leave=lowerCAmelCase_ ) ):
__UpperCAmelCase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.repeat_times , lowerCAmelCase_ , args.verbose , ) )
process_results(lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.base_variation , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCamelCase ( _UpperCAmelCase ):
_lowerCAmelCase : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
prior=lowercase__ , image_encoder=lowercase__ , image_processor=lowercase__ , scheduler=lowercase__ , renderer=lowercase__ , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Tuple = latents.to(lowercase__)
__UpperCAmelCase : Any = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[Any] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[str] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
@property
def A( self):
if self.device != torch.device('''meta''') or not hasattr(self.image_encoder , '''_hf_hook'''):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
if isinstance(lowercase__ , lowercase__) and isinstance(image[0] , torch.Tensor):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , axis=0) if image[0].ndim == 4 else torch.stack(lowercase__ , axis=0)
if not isinstance(lowercase__ , torch.Tensor):
__UpperCAmelCase : List[str] = self.image_processor(lowercase__ , return_tensors='''pt''').pixel_values[0].unsqueeze(0)
__UpperCAmelCase : List[Any] = image.to(dtype=self.image_encoder.dtype , device=lowercase__)
__UpperCAmelCase : int = self.image_encoder(lowercase__)["""last_hidden_state"""]
__UpperCAmelCase : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__UpperCAmelCase : List[str] = image_embeds.repeat_interleave(lowercase__ , dim=0)
if do_classifier_free_guidance:
__UpperCAmelCase : Any = torch.zeros_like(lowercase__)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : List[str] = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ = 1 , lowercase__ = 2_5 , lowercase__ = None , lowercase__ = None , lowercase__ = 4.0 , lowercase__ = 6_4 , lowercase__ = "pil" , lowercase__ = True , ):
if isinstance(lowercase__ , PIL.Image.Image):
__UpperCAmelCase : Dict = 1
elif isinstance(lowercase__ , torch.Tensor):
__UpperCAmelCase : str = image.shape[0]
elif isinstance(lowercase__ , lowercase__) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
__UpperCAmelCase : Union[str, Any] = len(lowercase__)
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase__)}")
__UpperCAmelCase : Tuple = self._execution_device
__UpperCAmelCase : int = batch_size * num_images_per_prompt
__UpperCAmelCase : Optional[int] = guidance_scale > 1.0
__UpperCAmelCase : List[str] = self._encode_image(lowercase__ , lowercase__ , lowercase__ , lowercase__)
# prior
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : Any = self.scheduler.timesteps
__UpperCAmelCase : int = self.prior.config.num_embeddings
__UpperCAmelCase : Optional[int] = self.prior.config.embedding_dim
__UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__UpperCAmelCase : Optional[int] = latents.reshape(latents.shape[0] , lowercase__ , lowercase__)
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Any = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = self.prior(
lowercase__ , timestep=lowercase__ , proj_embedding=lowercase__ , ).predicted_image_embedding
# remove the variance
__UpperCAmelCase : Any = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__UpperCAmelCase : List[Any] = noise_pred.chunk(2)
__UpperCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__UpperCAmelCase : int = self.scheduler.step(
lowercase__ , timestep=lowercase__ , sample=lowercase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase__)
__UpperCAmelCase : List[str] = []
for i, latent in enumerate(lowercase__):
print()
__UpperCAmelCase : int = self.renderer.decode(
latent[None, :] , lowercase__ , size=lowercase__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase__)
__UpperCAmelCase : Any = torch.stack(lowercase__)
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}")
__UpperCAmelCase : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = [self.numpy_to_pil(lowercase__) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase__)
| 704 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : int = torch.nn.Linear(1_0 , 1_0)
__UpperCAmelCase : str = torch.optim.SGD(model.parameters() , 0.1)
__UpperCAmelCase : List[str] = Accelerator()
__UpperCAmelCase : Union[str, Any] = accelerator.prepare(__snake_case)
try:
pickle.loads(pickle.dumps(__snake_case))
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=2 , lowercase__=2_4 , lowercase__=1_6 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0 , lowercase__=0.0_2 , lowercase__=None , lowercase__=2 , lowercase__=2 , ):
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[Any] = max_length
__UpperCAmelCase : Tuple = num_mel_bins
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Dict = frequency_stride
__UpperCAmelCase : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCAmelCase : Tuple = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCAmelCase : str = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCAmelCase : Dict = frequency_out_dimension * time_out_dimension
__UpperCAmelCase : str = num_patches + 2
def A( self):
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : str = self.get_config()
return config, input_values, labels
def A( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = ASTModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = config_and_inputs
__UpperCAmelCase : Optional[int] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Any = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Any = False
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A( self):
__UpperCAmelCase : List[Any] = ASTModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(lowercase__)
__UpperCAmelCase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
@slow
def A( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = ASTModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__UpperCAmelCase , __UpperCAmelCase : int = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def A( self):
__UpperCAmelCase : Dict = self.default_feature_extractor
__UpperCAmelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(lowercase__)
__UpperCAmelCase : List[str] = self.default_feature_extractor
__UpperCAmelCase , __UpperCAmelCase : Any = prepare_audio()
__UpperCAmelCase : List[Any] = audio.squeeze().numpy()
__UpperCAmelCase : Any = feature_extractor(lowercase__ , sampling_rate=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCAmelCase : Any = model(**lowercase__)
# verify the logits
__UpperCAmelCase : Tuple = torch.Size((1, 5_2_7))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
__UpperCAmelCase : List[str] = f"Input value of [number={number}] must be an integer"
raise TypeError(lowercase_ )
if number < 1:
__UpperCAmelCase : List[Any] = f"Input value of [number={number}] must be > 0"
raise ValueError(lowercase_ )
__UpperCAmelCase : Tuple = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
import argparse
lowerCAmelCase = """docs/source/_static/js/custom.js"""
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
with open(__UpperCamelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : Tuple = f.readlines()
__UpperCAmelCase : Dict = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__UpperCAmelCase : int = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCAmelCase = parser.parse_args()
update_custom_js(args.version)
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = torch.device("""cpu""")
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = dct.pop(snake_case_ )
__UpperCAmelCase : Optional[int] = val
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = []
for k in state_dict.keys():
__UpperCAmelCase : Dict = k
if ".pwconv" in k:
__UpperCAmelCase : Dict = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
__UpperCAmelCase : int = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
__UpperCAmelCase : Tuple = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
__UpperCAmelCase : str = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
__UpperCAmelCase : Optional[int] = k_new.split('''.''' )
if ls[2].isdigit():
__UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__UpperCAmelCase : int = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase : Dict = 1000
__UpperCAmelCase : Optional[int] = """huggingface/label-files"""
__UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Any = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = idalabel
__UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase : Dict = [3, 3, 6, 4]
__UpperCAmelCase : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase : List[str] = [3, 3, 9, 6]
__UpperCAmelCase : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase : Optional[Any] = [4, 3, 10, 5]
__UpperCAmelCase : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase : List[str] = [4, 4, 12, 6]
__UpperCAmelCase : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
__UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' , check_hash=snake_case_ )
else:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case_ , map_location='''cpu''' )
__UpperCAmelCase : List[str] = checkpoint
__UpperCAmelCase : List[Any] = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
__UpperCAmelCase : Union[str, Any] = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
__UpperCAmelCase : Tuple = prepare_img()
__UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
__UpperCAmelCase : Optional[Any] = processor(images=snake_case_ , return_tensors='''pt''' )
# compare outputs from both models
__UpperCAmelCase : Optional[int] = get_expected_output(snake_case_ )
__UpperCAmelCase : Union[str, Any] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case_ , atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
lowerCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 710 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowerCAmelCase = get_tests_dir("""fixtures/vocab.json""")
lowerCAmelCase = get_tests_dir("""fixtures""")
class lowerCamelCase ( unittest.TestCase ):
_lowerCAmelCase : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def A( self):
__UpperCAmelCase : List[Any] = 0
def A( self):
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = WavaVecaConfig()
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
# save in new folder
model_config.save_pretrained(UpperCamelCase__)
processor.save_pretrained(UpperCamelCase__)
__UpperCAmelCase : Any = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__))
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json'''))
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor()
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
__UpperCAmelCase : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__)
# save in new folder
processor.save_pretrained(UpperCamelCase__)
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , '''r''') as f:
__UpperCAmelCase : str = json.load(UpperCamelCase__)
config_dict.pop('''processor_class''')
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , '''w''') as f:
f.write(json.dumps(UpperCamelCase__))
__UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor()
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
__UpperCAmelCase : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__)
# save in new folder
processor.save_pretrained(UpperCamelCase__)
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , '''r''') as f:
__UpperCAmelCase : List[Any] = json.load(UpperCamelCase__)
config_dict.pop('''processor_class''')
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , '''w''') as f:
f.write(json.dumps(UpperCamelCase__))
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''')
model_config.save_pretrained(UpperCamelCase__)
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json'''))
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__) , '''w''') as f:
f.write('''{}''')
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
def A( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__):
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__):
__UpperCAmelCase : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__)
__UpperCAmelCase : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
__UpperCAmelCase : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
__UpperCAmelCase : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCAmelCase : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__)
__UpperCAmelCase : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present)
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def A( self):
try:
AutoConfig.register('''custom''' , UpperCamelCase__)
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__)
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__)
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : str = os.path.join(UpperCamelCase__ , '''vocab.txt''')
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCAmelCase : str = CustomTokenizer(UpperCamelCase__)
__UpperCAmelCase : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__)
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__)
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A( self):
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase : Union[str, Any] = False
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase : str = False
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase : Optional[int] = '''AutoFeatureExtractor'''
_lowerCAmelCase : Any = '''AutoTokenizer'''
_lowerCAmelCase : Dict = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__)
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__)
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__)
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__)
# If remote code is not set, the default is to use local classes.
__UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote code is disabled, we load the local ones.
__UpperCAmelCase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub.
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertTrue(processor.special_attribute_present)
self.assertTrue(processor.feature_extractor.special_attribute_present)
self.assertTrue(processor.tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A( self):
__UpperCAmelCase : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''')
def A( self):
__UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''')
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''')
@is_staging_test
class lowerCamelCase ( unittest.TestCase ):
_lowerCAmelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def A( cls):
__UpperCAmelCase : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__)
@classmethod
def A( cls):
try:
delete_repo(token=cls._token , repo_id='''test-processor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''')
except HTTPError:
pass
def A( self):
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''') , push_to_hub=UpperCamelCase__ , use_auth_token=self._token)
__UpperCAmelCase : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor")
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def A( self):
__UpperCAmelCase : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''') , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
__UpperCAmelCase : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def A( self):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''')
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCAmelCase : Union[str, Any] = CustomTokenizer(UpperCamelCase__)
__UpperCAmelCase : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token)
__UpperCAmelCase : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token)
processor.save_pretrained(UpperCamelCase__)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''')) as f:
__UpperCAmelCase : Optional[int] = json.load(UpperCamelCase__)
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''')))
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''')))
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''')))
repo.push_to_hub()
__UpperCAmelCase : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''')
| 711 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """spm_char.model"""}
lowerCAmelCase = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowerCAmelCase = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class lowerCamelCase ( lowerCAmelCase__ ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : str = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCamelCase)
@property
def A( self):
return self.sp_model.get_piece_size()
def A( self):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
__UpperCAmelCase : int = self.__dict__.copy()
__UpperCAmelCase : Dict = None
return state
def __setstate__( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A( self , lowercase__):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase)
def A( self , lowercase__):
return self.sp_model.piece_to_id(_lowerCamelCase)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = self.sp_model.IdToPiece(_lowerCamelCase)
return token
def A( self , lowercase__):
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase) + token
__UpperCAmelCase : Tuple = []
else:
current_sub_tokens.append(_lowerCamelCase)
out_string += self.sp_model.decode(_lowerCamelCase)
return out_string.strip()
def A( self , lowercase__ , lowercase__=None):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
__UpperCAmelCase : str = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase)) + suffix_ones
return ([0] * len(_lowerCamelCase)) + ([0] * len(_lowerCamelCase)) + suffix_ones
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(_lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCamelCase , '''wb''') as fi:
__UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase)
return (out_vocab_file,)
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
with open(__snake_case ) as metadata_file:
__UpperCAmelCase : str = json.load(__snake_case )
__UpperCAmelCase : Optional[Any] = LukeConfig(use_entity_aware_attention=__snake_case , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__UpperCAmelCase : Any = torch.load(__snake_case , map_location='''cpu''' )["module"]
# Load the entity vocab file
__UpperCAmelCase : Optional[int] = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
__UpperCAmelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCAmelCase : List[Any] = AddedToken('''<ent>''' , lstrip=__snake_case , rstrip=__snake_case )
__UpperCAmelCase : List[Any] = AddedToken('''<ent2>''' , lstrip=__snake_case , rstrip=__snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''r''' ) as f:
__UpperCAmelCase : str = json.load(__snake_case )
__UpperCAmelCase : Optional[Any] = "MLukeTokenizer"
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
__UpperCAmelCase : List[Any] = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__UpperCAmelCase : Tuple = state_dict["embeddings.word_embeddings.weight"]
__UpperCAmelCase : Dict = word_emb[ent_init_index].unsqueeze(0 )
__UpperCAmelCase : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__UpperCAmelCase : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCAmelCase : Dict = state_dict[bias_name]
__UpperCAmelCase : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCAmelCase : str = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCAmelCase : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCAmelCase : int = f"encoder.layer.{layer_index}.attention.self."
__UpperCAmelCase : Dict = state_dict[prefix + matrix_name]
__UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
__UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCAmelCase : List[str] = state_dict["entity_embeddings.entity_embeddings.weight"]
__UpperCAmelCase : Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCAmelCase : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCAmelCase : Union[str, Any] = state_dict["entity_predictions.bias"]
__UpperCAmelCase : Tuple = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCAmelCase : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCAmelCase : List[Any] = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__UpperCAmelCase : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__UpperCAmelCase : Tuple = state_dict[key]
else:
__UpperCAmelCase : Optional[int] = state_dict[key]
__UpperCAmelCase : str = model.load_state_dict(__snake_case , strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCAmelCase : int = MLukeTokenizer.from_pretrained(__snake_case , task='''entity_classification''' )
__UpperCAmelCase : Any = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__UpperCAmelCase : str = (0, 9)
__UpperCAmelCase : Optional[Any] = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase : int = torch.Size((1, 33, 768) )
__UpperCAmelCase : List[Any] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase : int = torch.Size((1, 1, 768) )
__UpperCAmelCase : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCAmelCase : Union[str, Any] = MLukeTokenizer.from_pretrained(__snake_case )
__UpperCAmelCase : Optional[int] = "Tokyo is the capital of <mask>."
__UpperCAmelCase : List[Any] = (24, 30)
__UpperCAmelCase : Any = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' )
__UpperCAmelCase : Optional[Any] = model(**__snake_case )
__UpperCAmelCase : int = encoding["input_ids"][0].tolist()
__UpperCAmelCase : Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__UpperCAmelCase : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
__UpperCAmelCase : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__UpperCAmelCase : List[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__snake_case ) )
model.save_pretrained(__snake_case )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["[MASK]", "[PAD]", "[UNK]"]
__UpperCAmelCase : List[Any] = [json.loads(__snake_case ) for line in open(__snake_case )]
__UpperCAmelCase : Tuple = {}
for entry in data:
__UpperCAmelCase : str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCAmelCase : List[str] = entity_id
break
__UpperCAmelCase : Optional[Any] = f"{language}:{entity_name}"
__UpperCAmelCase : Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 713 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase : str = g.get_repo('''huggingface/transformers''' )
__UpperCAmelCase : Optional[int] = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=a__ )
__UpperCAmelCase : str = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCAmelCase : Union[str, Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__UpperCAmelCase : Optional[int] = f"{src_lang}-{tgt_lang}"
__UpperCAmelCase : int = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__UpperCAmelCase : int = os.path.join(UpperCamelCase__ , '''README.md''' )
print(f"Generating {path}" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = model_name.split("""-""")
lowerCAmelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, """rb""") as fp:
lowerCAmelCase = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = KandinskyImgaImgPipeline
_lowerCAmelCase : List[Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_lowerCAmelCase : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowerCAmelCase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase : Tuple = False
@property
def A( self):
return 3_2
@property
def A( self):
return 3_2
@property
def A( self):
return self.time_input_dim
@property
def A( self):
return self.time_input_dim * 4
@property
def A( self):
return 1_0_0
@property
def A( self):
__UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__UpperCAmelCase : int = MultilingualCLIP(__a)
__UpperCAmelCase : Any = text_encoder.eval()
return text_encoder
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCAmelCase : List[str] = UNetaDConditionModel(**__a)
return model
@property
def A( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs)
return model
def A( self):
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : Union[str, Any] = self.dummy_tokenizer
__UpperCAmelCase : List[str] = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : str = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__UpperCAmelCase : List[str] = DDIMScheduler(**__a)
__UpperCAmelCase : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A( self , lowercase__ , lowercase__=0):
__UpperCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a)).to(__a)
__UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(__a)
# create init_image
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__a)).to(__a)
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
__UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(__a)).convert('''RGB''').resize((2_5_6, 2_5_6))
if str(__a).startswith('''mps'''):
__UpperCAmelCase : Optional[int] = torch.manual_seed(__a)
else:
__UpperCAmelCase : Any = torch.Generator(device=__a).manual_seed(__a)
__UpperCAmelCase : Union[str, Any] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def A( self):
__UpperCAmelCase : Tuple = 'cpu'
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = self.pipeline_class(**__a)
__UpperCAmelCase : Tuple = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
__UpperCAmelCase : List[str] = pipe(**self.get_dummy_inputs(__a))
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(__a) , return_dict=__a , )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCAmelCase : List[str] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''')
__UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__UpperCAmelCase : int = 'A red cartoon frog, 4k'
__UpperCAmelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(__a)
__UpperCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa)
__UpperCAmelCase : Any = pipeline.to(__a)
pipeline.set_progress_bar_config(disable=__a)
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : Optional[int] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__UpperCAmelCase : List[Any] = pipeline(
__a , image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
__UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__a , __a)
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , _lowercase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowercase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Any = r'''.*sequential.(\d+).*'''
__UpperCAmelCase : Tuple = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase : Optional[int] = key.replace(_lowercase , _lowercase )
if re.match(_lowercase , _lowercase ):
# replace sequential layers with list
__UpperCAmelCase : List[str] = re.match(_lowercase , _lowercase ).group(1 )
__UpperCAmelCase : int = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_lowercase )//3}.linear." )
elif re.match(_lowercase , _lowercase ):
__UpperCAmelCase : str = int(re.match(_lowercase , _lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__UpperCAmelCase : Any = 1 if projecton_layer == 0 else 2
__UpperCAmelCase : List[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
__UpperCAmelCase : List[str] = value
__UpperCAmelCase : Any = mixed_qkv.size(0 ) // 3
__UpperCAmelCase : Tuple = mixed_qkv[:qkv_dim]
__UpperCAmelCase : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__UpperCAmelCase : Dict = mixed_qkv[qkv_dim * 2 :]
__UpperCAmelCase : List[str] = query_layer
__UpperCAmelCase : Tuple = key_layer
__UpperCAmelCase : Optional[Any] = value_layer
else:
__UpperCAmelCase : List[str] = value
return model_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = init_clap(_lowercase , enable_fusion=_lowercase )
clap_model.eval()
__UpperCAmelCase : int = clap_model.state_dict()
__UpperCAmelCase : Union[str, Any] = rename_state_dict(_lowercase )
__UpperCAmelCase : str = ClapConfig()
__UpperCAmelCase : Optional[Any] = enable_fusion
__UpperCAmelCase : List[str] = ClapModel(_lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowercase , strict=_lowercase )
model.save_pretrained(_lowercase )
transformers_config.save_pretrained(_lowercase )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
def __init__( self , *lowercase__ , **lowercase__):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase)
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase__ = True , lowercase__ = 3_2 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ):
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Tuple = do_rescale
__UpperCAmelCase : Tuple = size_divisor
__UpperCAmelCase : List[Any] = resample
super().__init__(**A__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__):
__UpperCAmelCase : str = get_image_size(A__)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCAmelCase : List[str] = height // size_divisor * size_divisor
__UpperCAmelCase : List[Any] = width // size_divisor * size_divisor
__UpperCAmelCase : List[Any] = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__)
return image
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__):
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : int = size_divisor if size_divisor is not None else self.size_divisor
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCAmelCase : str = make_list_of_images(A__)
if not valid_images(A__):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(A__) for img in images]
if do_resize:
__UpperCAmelCase : Optional[Any] = [self.resize(A__ , size_divisor=A__ , resample=A__) for image in images]
if do_rescale:
__UpperCAmelCase : List[Any] = [self.rescale(A__ , scale=1 / 2_5_5) for image in images]
__UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(A__ , A__) for image in images]
__UpperCAmelCase : int = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__)
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=['''polics''', '''health'''])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''')
self.assertEqual(_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a)], '''scores''': [ANY(_a)]})
# No kwarg
__UpperCAmelCase : List[str] = classifier('''Who are you voting for in 2020?''' , ['''politics'''])
self.assertEqual(_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a)], '''scores''': [ANY(_a)]})
__UpperCAmelCase : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''])
self.assertEqual(_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a)], '''scores''': [ANY(_a)]})
__UpperCAmelCase : Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''')
self.assertEqual(
_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a), ANY(_a)], '''scores''': [ANY(_a), ANY(_a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
__UpperCAmelCase : Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''])
self.assertEqual(
_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a), ANY(_a)], '''scores''': [ANY(_a), ANY(_a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
__UpperCAmelCase : Tuple = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''')
self.assertEqual(_a , {'''sequence''': ANY(_a), '''labels''': [ANY(_a)], '''scores''': [ANY(_a)]})
# https://github.com/huggingface/transformers/issues/13846
__UpperCAmelCase : Optional[Any] = classifier(['''I am happy'''] , ['''positive''', '''negative'''])
self.assertEqual(
_a , [
{'''sequence''': ANY(_a), '''labels''': [ANY(_a), ANY(_a)], '''scores''': [ANY(_a), ANY(_a)]}
for i in range(1)
] , )
__UpperCAmelCase : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''])
self.assertEqual(
_a , [
{'''sequence''': ANY(_a), '''labels''': [ANY(_a), ANY(_a)], '''scores''': [ANY(_a), ANY(_a)]}
for i in range(2)
] , )
with self.assertRaises(_a):
classifier('''''' , candidate_labels='''politics''')
with self.assertRaises(_a):
classifier(_a , candidate_labels='''politics''')
with self.assertRaises(_a):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''')
with self.assertRaises(_a):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_a)
with self.assertRaises(_a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_a , )
self.run_entailment_id(_a)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = zero_shot_classifier.model.config
__UpperCAmelCase : Any = config.labelaid
__UpperCAmelCase : Optional[Any] = zero_shot_classifier.entailment_id
__UpperCAmelCase : List[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
__UpperCAmelCase : List[str] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
__UpperCAmelCase : Union[str, Any] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
__UpperCAmelCase : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
__UpperCAmelCase : str = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id)
@require_torch
def A( self):
__UpperCAmelCase : List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''])
@require_torch
def A( self):
__UpperCAmelCase : Dict = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__UpperCAmelCase : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def A( self):
__UpperCAmelCase : Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__UpperCAmelCase : List[str] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def A( self):
__UpperCAmelCase : Dict = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''')
__UpperCAmelCase : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def A( self):
__UpperCAmelCase : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''')
__UpperCAmelCase : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__UpperCAmelCase : List[Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCAmelCase : Tuple = g.get_repo('''huggingface/diffusers''' )
__UpperCAmelCase : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCAmelCase : str = sorted(issue.get_comments() , key=lambda lowercase_ : i.created_at , reverse=_lowerCamelCase )
__UpperCAmelCase : Any = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCAmelCase : str = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
__UpperCAmelCase : Optional[int] = getattr(lowercase_ , lowercase_ ).shape
else:
__UpperCAmelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Dict = value
elif weight_type == "weight_v":
__UpperCAmelCase : Tuple = value
elif weight_type == "bias":
__UpperCAmelCase : str = value
else:
__UpperCAmelCase : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = []
__UpperCAmelCase : str = fairseq_model.state_dict()
__UpperCAmelCase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Any = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCAmelCase : List[str] = True
if "*" in mapped_key:
__UpperCAmelCase : List[Any] = name.split(lowercase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Optional[Any] = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__UpperCAmelCase : Optional[int] = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : Any = '''weight_v'''
elif "weight" in name:
__UpperCAmelCase : Union[str, Any] = '''weight'''
elif "bias" in name:
__UpperCAmelCase : List[str] = '''bias'''
else:
__UpperCAmelCase : Optional[int] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase : Dict = name.split('''.''' )
__UpperCAmelCase : Tuple = int(items[0] )
__UpperCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__UpperCAmelCase : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__UpperCAmelCase : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : List[str] = HubertConfig.from_pretrained(lowercase_ )
else:
__UpperCAmelCase : str = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCAmelCase : Dict = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Any = target_dict.pad_index
__UpperCAmelCase : List[Any] = target_dict.bos_index
__UpperCAmelCase : Tuple = target_dict.eos_index
__UpperCAmelCase : Union[str, Any] = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , lowercase_ )
__UpperCAmelCase : List[str] = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
__UpperCAmelCase : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
__UpperCAmelCase : int = HubertForCTC(lowercase_ )
else:
__UpperCAmelCase : Optional[Any] = HubertModel(lowercase_ )
if is_finetuned:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCAmelCase : Any = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
_lowerCAmelCase : int = '''maskformer'''
_lowerCAmelCase : str = {'''hidden_size''': '''mask_feature_size'''}
_lowerCAmelCase : Any = ['''resnet''', '''swin''']
_lowerCAmelCase : str = ['''detr''']
def __init__( self , lowercase__ = 2_5_6 , lowercase__ = 2_5_6 , lowercase__ = 0.1 , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0.0_2 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 2_0.0 , lowercase__ = None , **lowercase__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[Any] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = backbone_config.pop('''model_type''')
__UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Optional[int] = config_class.from_dict(lowercase__)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported)}")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : Tuple = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : Optional[Any] = (
decoder_config.pop('''model_type''') if isinstance(lowercase__ , lowercase__) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported)}")
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Tuple = config_class.from_dict(lowercase__)
__UpperCAmelCase : Union[str, Any] = backbone_config
__UpperCAmelCase : int = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Union[str, Any] = fpn_feature_size
__UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
__UpperCAmelCase : Optional[int] = init_std
__UpperCAmelCase : int = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : str = cross_entropy_weight
__UpperCAmelCase : List[Any] = dice_weight
__UpperCAmelCase : Any = mask_weight
__UpperCAmelCase : Dict = use_auxiliary_loss
__UpperCAmelCase : Dict = no_object_weight
__UpperCAmelCase : Optional[int] = output_auxiliary_logits
__UpperCAmelCase : Union[str, Any] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase__)
@classmethod
def A( cls , lowercase__ , lowercase__ , **lowercase__):
return cls(
backbone_config=lowercase__ , decoder_config=lowercase__ , **lowercase__ , )
def A( self):
__UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : List[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Dict = self.decoder_config.to_dict()
__UpperCAmelCase : int = self.__class__.model_type
return output
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> Dict:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
__UpperCAmelCase : str = nn.Parameter(SCREAMING_SNAKE_CASE_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
__UpperCAmelCase : List[str] = nn.Parameter(SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = np.asarray(weights[0] )
__UpperCAmelCase : Dict = np.asarray(weights[1] )
__UpperCAmelCase : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = np.asarray(weights[0] )
__UpperCAmelCase : Any = np.asarray(weights[1] )
__UpperCAmelCase : Tuple = np.asarray(weights[2] )
__UpperCAmelCase : Any = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = weights[0][0][0]
__UpperCAmelCase : int = np.asarray(layer_norm_a[0] )
__UpperCAmelCase : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# lsh weights + output
__UpperCAmelCase : Optional[Any] = weights[0][1]
if len(SCREAMING_SNAKE_CASE_ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
# intermediate weighs
__UpperCAmelCase : List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE_ ) == 4:
__UpperCAmelCase : Dict = intermediate_weights[2]
# layernorm 2
__UpperCAmelCase : List[str] = np.asarray(intermediate_weights[0][0] )
__UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate dense
__UpperCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[1][0] )
__UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate out
__UpperCAmelCase : Any = np.asarray(intermediate_weights[4][0] )
__UpperCAmelCase : Tuple = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
__UpperCAmelCase : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ):
__UpperCAmelCase : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCAmelCase : int = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
__UpperCAmelCase : int = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) )
__UpperCAmelCase : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCAmelCase : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# output layer norm
__UpperCAmelCase : Any = np.asarray(weights[7][0] )
__UpperCAmelCase : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# output embeddings
__UpperCAmelCase : List[str] = np.asarray(weights[9][0] )
__UpperCAmelCase : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"Building PyTorch model from configuration: {config}" )
__UpperCAmelCase : List[Any] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
__UpperCAmelCase : Dict = pickle.load(SCREAMING_SNAKE_CASE_ )['weights']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
import random
class lowerCamelCase :
@staticmethod
def A( lowercase__):
__UpperCAmelCase : Dict = [ord(lowercase__) for i in text]
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[str] = []
for i in plain:
__UpperCAmelCase : Optional[int] = random.randint(1 , 3_0_0)
__UpperCAmelCase : List[Any] = (i + k) * k
cipher.append(lowercase__)
key.append(lowercase__)
return cipher, key
@staticmethod
def A( lowercase__ , lowercase__):
__UpperCAmelCase : Dict = []
for i in range(len(lowercase__)):
__UpperCAmelCase : int = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowercase__))
return "".join(lowercase__)
if __name__ == "__main__":
lowerCAmelCase ,lowerCAmelCase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 704 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase ( __a ):
_lowerCAmelCase : "DiagonalGaussianDistribution"
class lowerCamelCase ( __a , __a ):
_lowerCAmelCase : Tuple = True
@register_to_config
def __init__( self , lowercase__ = 3 , lowercase__ = 3 , lowercase__ = ("DownEncoderBlock2D",) , lowercase__ = ("UpDecoderBlock2D",) , lowercase__ = (6_4,) , lowercase__ = 1 , lowercase__ = "silu" , lowercase__ = 4 , lowercase__ = 3_2 , lowercase__ = 3_2 , lowercase__ = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase : Optional[int] = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
# pass init params to Decoder
__UpperCAmelCase : str = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , norm_num_groups=snake_case__ , act_fn=snake_case__ , )
__UpperCAmelCase : Tuple = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
__UpperCAmelCase : List[str] = nn.Convad(snake_case__ , snake_case__ , 1)
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
# only relevant if vae tiling is enabled
__UpperCAmelCase : Optional[Any] = self.config.sample_size
__UpperCAmelCase : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
__UpperCAmelCase : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
__UpperCAmelCase : Union[str, Any] = 0.2_5
def A( self , lowercase__ , lowercase__=False):
if isinstance(snake_case__ , (Encoder, Decoder)):
__UpperCAmelCase : Tuple = value
def A( self , lowercase__ = True):
__UpperCAmelCase : Tuple = use_tiling
def A( self):
self.enable_tiling(snake_case__)
def A( self):
__UpperCAmelCase : Any = True
def A( self):
__UpperCAmelCase : List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A( self):
__UpperCAmelCase : Dict = {}
def fn_recursive_add_processors(lowercase__ , lowercase__ , lowercase__):
if hasattr(snake_case__ , '''set_processor'''):
__UpperCAmelCase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , snake_case__ , snake_case__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__)
return processors
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = len(self.attn_processors.keys())
if isinstance(snake_case__ , snake_case__) and len(snake_case__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(snake_case__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(lowercase__ , lowercase__ , lowercase__):
if hasattr(snake_case__ , '''set_processor'''):
if not isinstance(snake_case__ , snake_case__):
module.set_processor(snake_case__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , snake_case__ , snake_case__)
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__)
def A( self):
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case__ , return_dict=snake_case__)
if self.use_slicing and x.shape[0] > 1:
__UpperCAmelCase : Union[str, Any] = [self.encoder(snake_case__) for x_slice in x.split(1)]
__UpperCAmelCase : List[str] = torch.cat(snake_case__)
else:
__UpperCAmelCase : Dict = self.encoder(snake_case__)
__UpperCAmelCase : Dict = self.quant_conv(snake_case__)
__UpperCAmelCase : int = DiagonalGaussianDistribution(snake_case__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__)
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case__ , return_dict=snake_case__)
__UpperCAmelCase : Any = self.post_quant_conv(snake_case__)
__UpperCAmelCase : Any = self.decoder(snake_case__)
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_slicing and z.shape[0] > 1:
__UpperCAmelCase : Dict = [self._decode(snake_case__).sample for z_slice in z.split(1)]
__UpperCAmelCase : str = torch.cat(snake_case__)
else:
__UpperCAmelCase : Dict = self._decode(snake_case__).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case__)
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = min(a.shape[2] , b.shape[2] , snake_case__)
for y in range(snake_case__):
__UpperCAmelCase : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = min(a.shape[3] , b.shape[3] , snake_case__)
for x in range(snake_case__):
__UpperCAmelCase : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : Optional[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor)
__UpperCAmelCase : List[str] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__UpperCAmelCase : Dict = []
for i in range(0 , x.shape[2] , snake_case__):
__UpperCAmelCase : Union[str, Any] = []
for j in range(0 , x.shape[3] , snake_case__):
__UpperCAmelCase : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__UpperCAmelCase : Dict = self.encoder(snake_case__)
__UpperCAmelCase : Union[str, Any] = self.quant_conv(snake_case__)
row.append(snake_case__)
rows.append(snake_case__)
__UpperCAmelCase : Any = []
for i, row in enumerate(snake_case__):
__UpperCAmelCase : Optional[Any] = []
for j, tile in enumerate(snake_case__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : int = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__)
if j > 0:
__UpperCAmelCase : Any = self.blend_h(row[j - 1] , snake_case__ , snake_case__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(snake_case__ , dim=3))
__UpperCAmelCase : Any = torch.cat(snake_case__ , dim=2)
__UpperCAmelCase : int = DiagonalGaussianDistribution(snake_case__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__)
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : Any = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor)
__UpperCAmelCase : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__UpperCAmelCase : int = []
for i in range(0 , z.shape[2] , snake_case__):
__UpperCAmelCase : Optional[Any] = []
for j in range(0 , z.shape[3] , snake_case__):
__UpperCAmelCase : Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__UpperCAmelCase : List[Any] = self.post_quant_conv(snake_case__)
__UpperCAmelCase : Union[str, Any] = self.decoder(snake_case__)
row.append(snake_case__)
rows.append(snake_case__)
__UpperCAmelCase : int = []
for i, row in enumerate(snake_case__):
__UpperCAmelCase : Dict = []
for j, tile in enumerate(snake_case__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : Union[str, Any] = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__)
if j > 0:
__UpperCAmelCase : Tuple = self.blend_h(row[j - 1] , snake_case__ , snake_case__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(snake_case__ , dim=3))
__UpperCAmelCase : List[str] = torch.cat(snake_case__ , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
def A( self , lowercase__ , lowercase__ = False , lowercase__ = True , lowercase__ = None , ):
__UpperCAmelCase : List[Any] = sample
__UpperCAmelCase : str = self.encode(snake_case__).latent_dist
if sample_posterior:
__UpperCAmelCase : Optional[int] = posterior.sample(generator=snake_case__)
else:
__UpperCAmelCase : Optional[int] = posterior.mode()
__UpperCAmelCase : Any = self.decode(snake_case__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__)
| 706 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase = """\
"""
lowerCAmelCase = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCAmelCase = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def A( self , lowercase__ , lowercase__ , lowercase__ = 1_6 , lowercase__ = True , lowercase__=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__UpperCAmelCase : Optional[Any] = "cuda"
else:
__UpperCAmelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(a_)
__UpperCAmelCase : List[str] = model.to(a_)
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(a_)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__UpperCAmelCase : str = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(a_) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__UpperCAmelCase : List[str] = model.config.max_length - 1
else:
__UpperCAmelCase : int = model.config.max_length
__UpperCAmelCase : Any = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_)
__UpperCAmelCase : Dict = encodings["input_ids"]
__UpperCAmelCase : Union[str, Any] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__UpperCAmelCase : str = []
__UpperCAmelCase : Union[str, Any] = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(a_) , a_)):
__UpperCAmelCase : Optional[Any] = min(start_index + batch_size , len(a_))
__UpperCAmelCase : List[str] = encoded_texts[start_index:end_index]
__UpperCAmelCase : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
__UpperCAmelCase : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(a_)
__UpperCAmelCase : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
__UpperCAmelCase : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(a_), attn_mask] , dim=1)
__UpperCAmelCase : Tuple = encoded_batch
with torch.no_grad():
__UpperCAmelCase : Dict = model(a_ , attention_mask=a_).logits
__UpperCAmelCase : List[str] = out_logits[..., :-1, :].contiguous()
__UpperCAmelCase : Any = labels[..., 1:].contiguous()
__UpperCAmelCase : int = attn_mask[..., 1:].contiguous()
__UpperCAmelCase : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , a_) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_)}
| 707 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''convbert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=7_6_8 , lowercase__=2 , lowercase__=9 , lowercase__=1 , lowercase__=None , **lowercase__ , ):
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Dict = embedding_size
__UpperCAmelCase : str = head_ratio
__UpperCAmelCase : Union[str, Any] = conv_kernel_size
__UpperCAmelCase : int = num_groups
__UpperCAmelCase : Dict = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = None
# source code of `config_class`
__UpperCAmelCase : str = inspect.getsource(_A )
__UpperCAmelCase : int = _re_checkpoint.findall(_A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__UpperCAmelCase : int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__UpperCAmelCase : List[str] = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__UpperCAmelCase : List[str] = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__UpperCAmelCase : Union[str, Any] = get_checkpoint_from_config_class(_A )
__UpperCAmelCase : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
__UpperCAmelCase : Optional[Any] = '''\n'''.join(sorted(_A ) )
raise ValueError(f"The following configurations don\'t contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 709 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = list(range(len(A_ ) ) )
__UpperCAmelCase : int = [v / w for v, w in zip(A_ , A_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=A_ )
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = [0] * len(A_ )
for i in index:
if weight[i] <= capacity:
__UpperCAmelCase : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCAmelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Dict = str(lowercase_ )
while len(lowercase_ ) != 1:
__UpperCAmelCase : Any = [int(lowercase_ ) for i in num_string]
__UpperCAmelCase : Optional[Any] = 1
for i in range(0 , len(lowercase_ ) ):
total *= numbers[i]
__UpperCAmelCase : List[Any] = str(lowercase_ )
steps += 1
return steps
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Optional[Any] = str(lowercase_ )
while len(lowercase_ ) != 1:
__UpperCAmelCase : str = [int(lowercase_ ) for i in num_string]
__UpperCAmelCase : List[str] = 0
for i in range(0 , len(lowercase_ ) ):
total += numbers[i]
__UpperCAmelCase : int = str(lowercase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 0 |
lowerCAmelCase = {
"""km/h""": 1.0,
"""m/s""": 3.6,
"""mph""": 1.60_93_44,
"""knot""": 1.8_52,
}
lowerCAmelCase = {
"""km/h""": 1.0,
"""m/s""": 0.2_77_77_77_78,
"""mph""": 0.6_21_37_11_92,
"""knot""": 0.5_39_95_68_03,
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCAmelCase : Tuple = (
f"Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(__snake_case )}"
)
raise ValueError(__snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowerCamelCase ( lowercase_ ):
_lowerCAmelCase : List[Any] = '''markuplm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__=0 , lowercase__=2 , lowercase__=2_5_6 , lowercase__=1_0_2_4 , lowercase__=2_1_6 , lowercase__=1_0_0_1 , lowercase__=3_2 , lowercase__=5_0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Any = use_cache
__UpperCAmelCase : Any = classifier_dropout
# additional properties
__UpperCAmelCase : str = max_depth
__UpperCAmelCase : Dict = max_xpath_tag_unit_embeddings
__UpperCAmelCase : Dict = max_xpath_subs_unit_embeddings
__UpperCAmelCase : int = tag_pad_id
__UpperCAmelCase : int = subs_pad_id
__UpperCAmelCase : Any = xpath_unit_hidden_size
| 713 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self , *lowercase__ , **lowercase__):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def A( self , lowercase__=None , lowercase__=None , lowercase__=None):
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Optional[Any] = {}
if prompt is not None:
__UpperCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
__UpperCAmelCase : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__UpperCAmelCase : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
__UpperCAmelCase : Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowercase__ , **lowercase__):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_)
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : Dict = load_image(UpperCamelCase_)
if prompt is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_):
raise ValueError(
F"Received an invalid text input, got - {type(UpperCamelCase_)} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''')
__UpperCAmelCase : Any = self.model.config.model_type
if model_type == "git":
__UpperCAmelCase : List[str] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework)
__UpperCAmelCase : Optional[Any] = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_).input_ids
__UpperCAmelCase : List[Any] = [self.tokenizer.cls_token_id] + input_ids
__UpperCAmelCase : Any = torch.tensor(UpperCamelCase_).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
__UpperCAmelCase : int = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__UpperCAmelCase : List[str] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework)
__UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework)
model_inputs.update(UpperCamelCase_)
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation")
else:
__UpperCAmelCase : Any = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
__UpperCAmelCase : Optional[int] = None
return model_inputs
def A( self , lowercase__ , lowercase__=None):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , UpperCamelCase_)
and all(x is None for x in model_inputs['''input_ids'''])
):
__UpperCAmelCase : List[str] = None
if generate_kwargs is None:
__UpperCAmelCase : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__UpperCAmelCase : Optional[int] = model_inputs.pop(self.model.main_input_name)
__UpperCAmelCase : str = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_)
return model_outputs
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = []
for output_ids in model_outputs:
__UpperCAmelCase : Any = {
'''generated_text''': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , )
}
records.append(UpperCamelCase_)
return records
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , ) -> Tuple:
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , use_external_data_format=lowerCamelCase__ , enable_onnx_checker=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
else:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCAmelCase : List[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__UpperCAmelCase : Any = "cpu"
__UpperCAmelCase : Tuple = Path(lowerCamelCase__ )
# VAE DECODER
__UpperCAmelCase : Optional[int] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__UpperCAmelCase : Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCAmelCase : Union[str, Any] = vae_decoder.decode
onnx_export(
lowerCamelCase__ , model_args=(
torch.randn(1 , lowerCamelCase__ , 25 , 25 ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
lowerCAmelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 715 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 0 |
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Optional[Any] = {}
def A( self):
print(self.vertex)
for i in self.vertex:
print(lowercase_ , ''' -> ''' , ''' -> '''.join([str(lowercase_) for j in self.vertex[i]]))
def A( self , lowercase__ , lowercase__):
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_)
else:
# else make a new vertex
__UpperCAmelCase : List[Any] = [to_vertex]
def A( self):
__UpperCAmelCase : Any = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = True
print(lowercase_ , end=''' ''')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_)
if __name__ == "__main__":
lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class lowerCamelCase ( __lowerCAmelCase ):
def A( self , lowercase__):
if self.framework == "tf":
__UpperCAmelCase : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__UpperCAmelCase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def A( self , lowercase__):
__UpperCAmelCase : List[str] = self.get_masked_index(lowerCAmelCase_)
__UpperCAmelCase : List[Any] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def A( self , lowercase__):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase_)
def A( self , lowercase__ , lowercase__=None , **lowercase__):
if return_tensors is None:
__UpperCAmelCase : List[Any] = self.framework
__UpperCAmelCase : int = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
self.ensure_exactly_one_mask_token(lowerCAmelCase_)
return model_inputs
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = self.model(**lowerCAmelCase_)
__UpperCAmelCase : List[Any] = model_inputs['''input_ids''']
return model_outputs
def A( self , lowercase__ , lowercase__=5 , lowercase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase : int = target_ids.shape[0]
__UpperCAmelCase : str = model_outputs['''input_ids'''][0]
__UpperCAmelCase : str = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase : str = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__UpperCAmelCase : List[Any] = outputs.numpy()
__UpperCAmelCase : Any = outputs[0, masked_index, :]
__UpperCAmelCase : Optional[int] = stable_softmax(lowerCAmelCase_ , axis=-1)
if target_ids is not None:
__UpperCAmelCase : Dict = tf.gather_nd(tf.squeeze(lowerCAmelCase_ , 0) , target_ids.reshape(-1 , 1))
__UpperCAmelCase : int = tf.expand_dims(lowerCAmelCase_ , 0)
__UpperCAmelCase : Optional[int] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_)
__UpperCAmelCase , __UpperCAmelCase : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase : Tuple = outputs[0, masked_index, :]
__UpperCAmelCase : str = logits.softmax(dim=-1)
if target_ids is not None:
__UpperCAmelCase : Optional[Any] = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase : Tuple = probs.topk(lowerCAmelCase_)
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__UpperCAmelCase : Dict = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase : List[Any] = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase : int = target_ids[p].tolist()
__UpperCAmelCase : str = p
# Filter padding out:
__UpperCAmelCase : str = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
__UpperCAmelCase : Union[str, Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(lowerCAmelCase_)
result.append(lowerCAmelCase_)
if single_mask:
return result[0]
return result
def A( self , lowercase__ , lowercase__=None):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
__UpperCAmelCase : Optional[int] = [targets]
try:
__UpperCAmelCase : List[str] = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase : str = {}
__UpperCAmelCase : Any = []
for target in targets:
__UpperCAmelCase : Any = vocab.get(lowerCAmelCase_ , lowerCAmelCase_)
if id_ is None:
__UpperCAmelCase : List[Any] = self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , max_length=1 , truncation=lowerCAmelCase_ , )['''input_ids''']
if len(lowerCAmelCase_) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''')
continue
__UpperCAmelCase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__UpperCAmelCase : Dict = list(set(lowerCAmelCase_))
if len(lowerCAmelCase_) == 0:
raise ValueError('''At least one target must be provided when passed.''')
__UpperCAmelCase : List[str] = np.array(lowerCAmelCase_)
return target_ids
def A( self , lowercase__=None , lowercase__=None):
__UpperCAmelCase : str = {}
if targets is not None:
__UpperCAmelCase : Tuple = self.get_target_ids(lowerCAmelCase_ , lowerCAmelCase_)
__UpperCAmelCase : Tuple = target_ids
if top_k is not None:
__UpperCAmelCase : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self , lowercase__ , *lowercase__ , **lowercase__):
__UpperCAmelCase : Tuple = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) == 1:
return outputs[0]
return outputs
| 717 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
__UpperCAmelCase : str = head.next, head
while fast and fast.next:
__UpperCAmelCase : Union[str, Any] = fast.next.next
__UpperCAmelCase : Any = slow.next
__UpperCAmelCase : Union[str, Any] = slow.next
__UpperCAmelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__UpperCAmelCase : List[Any] = None
while second:
__UpperCAmelCase : str = second.next
__UpperCAmelCase : Dict = node
__UpperCAmelCase : str = second
__UpperCAmelCase : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__UpperCAmelCase : List[Any] = node.next
__UpperCAmelCase : Optional[int] = head.next
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__UpperCAmelCase : str = head
while fast and fast.next:
__UpperCAmelCase : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
__UpperCAmelCase : List[Any] = [slow.val]
while slow.next:
__UpperCAmelCase : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__UpperCAmelCase : Optional[Any] = cur.next
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = 0
while head:
if head.val in d:
d[head.val].append(lowercase_ )
else:
__UpperCAmelCase : List[Any] = [pos]
__UpperCAmelCase : Optional[Any] = head.next
pos += 1
__UpperCAmelCase : Optional[Any] = pos - 1
__UpperCAmelCase : Tuple = 0
for v in d.values():
if len(lowercase_ ) % 2 != 0:
middle += 1
else:
__UpperCAmelCase : List[str] = 0
for i in range(0 , len(lowercase_ ) ):
if v[i] + v[len(lowercase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 718 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase : Any = 4_2
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__=3 , lowercase__=3 , lowercase__=("DownEncoderBlock2D",) , lowercase__=(6_4,) , lowercase__=2 , lowercase__=3_2 , lowercase__="silu" , lowercase__=True , ):
super().__init__()
__UpperCAmelCase : int = layers_per_block
__UpperCAmelCase : Optional[int] = torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Tuple = nn.ModuleList([])
# down
__UpperCAmelCase : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(snake_case__):
__UpperCAmelCase : Tuple = output_channel
__UpperCAmelCase : int = block_out_channels[i]
__UpperCAmelCase : str = i == len(snake_case__) - 1
__UpperCAmelCase : Optional[int] = get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__)
# mid
__UpperCAmelCase : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
__UpperCAmelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6)
__UpperCAmelCase : Optional[Any] = nn.SiLU()
__UpperCAmelCase : Optional[Any] = 2 * out_channels if double_z else out_channels
__UpperCAmelCase : List[Any] = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1)
__UpperCAmelCase : List[str] = False
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = x
__UpperCAmelCase : Union[str, Any] = self.conv_in(snake_case__)
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__):
def custom_forward(*lowercase__):
return module(*snake_case__)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
__UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__) , snake_case__ , use_reentrant=snake_case__)
# middle
__UpperCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , snake_case__ , use_reentrant=snake_case__)
else:
for down_block in self.down_blocks:
__UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__) , snake_case__)
# middle
__UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , snake_case__)
else:
# down
for down_block in self.down_blocks:
__UpperCAmelCase : List[Any] = down_block(snake_case__)
# middle
__UpperCAmelCase : List[Any] = self.mid_block(snake_case__)
# post-process
__UpperCAmelCase : Any = self.conv_norm_out(snake_case__)
__UpperCAmelCase : Dict = self.conv_act(snake_case__)
__UpperCAmelCase : Tuple = self.conv_out(snake_case__)
return sample
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__=3 , lowercase__=3 , lowercase__=("UpDecoderBlock2D",) , lowercase__=(6_4,) , lowercase__=2 , lowercase__=3_2 , lowercase__="silu" , lowercase__="group" , ):
super().__init__()
__UpperCAmelCase : Dict = layers_per_block
__UpperCAmelCase : List[Any] = nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = nn.ModuleList([])
__UpperCAmelCase : List[str] = in_channels if norm_type == "spatial" else None
# mid
__UpperCAmelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
__UpperCAmelCase : Optional[Any] = list(reversed(snake_case__))
__UpperCAmelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__):
__UpperCAmelCase : List[Any] = output_channel
__UpperCAmelCase : Any = reversed_block_out_channels[i]
__UpperCAmelCase : Dict = i == len(snake_case__) - 1
__UpperCAmelCase : Any = get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__)
__UpperCAmelCase : Any = output_channel
# out
if norm_type == "spatial":
__UpperCAmelCase : List[str] = SpatialNorm(block_out_channels[0] , snake_case__)
else:
__UpperCAmelCase : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6)
__UpperCAmelCase : List[Any] = nn.SiLU()
__UpperCAmelCase : List[Any] = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1)
__UpperCAmelCase : Optional[int] = False
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : Tuple = z
__UpperCAmelCase : Tuple = self.conv_in(snake_case__)
__UpperCAmelCase : Any = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__):
def custom_forward(*lowercase__):
return module(*snake_case__)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
__UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , snake_case__ , snake_case__ , use_reentrant=snake_case__)
__UpperCAmelCase : Tuple = sample.to(snake_case__)
# up
for up_block in self.up_blocks:
__UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__) , snake_case__ , snake_case__ , use_reentrant=snake_case__)
else:
# middle
__UpperCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , snake_case__ , snake_case__)
__UpperCAmelCase : Any = sample.to(snake_case__)
# up
for up_block in self.up_blocks:
__UpperCAmelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__) , snake_case__ , snake_case__)
else:
# middle
__UpperCAmelCase : str = self.mid_block(snake_case__ , snake_case__)
__UpperCAmelCase : Tuple = sample.to(snake_case__)
# up
for up_block in self.up_blocks:
__UpperCAmelCase : int = up_block(snake_case__ , snake_case__)
# post-process
if latent_embeds is None:
__UpperCAmelCase : List[str] = self.conv_norm_out(snake_case__)
else:
__UpperCAmelCase : Tuple = self.conv_norm_out(snake_case__ , snake_case__)
__UpperCAmelCase : Optional[Any] = self.conv_act(snake_case__)
__UpperCAmelCase : str = self.conv_out(snake_case__)
return sample
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__="random" , lowercase__=False , lowercase__=True):
super().__init__()
__UpperCAmelCase : List[Any] = n_e
__UpperCAmelCase : Dict = vq_embed_dim
__UpperCAmelCase : int = beta
__UpperCAmelCase : Union[str, Any] = legacy
__UpperCAmelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
__UpperCAmelCase : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
__UpperCAmelCase : Optional[Any] = self.used.shape[0]
__UpperCAmelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCAmelCase : Dict = self.re_embed
__UpperCAmelCase : str = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices.")
else:
__UpperCAmelCase : Tuple = n_e
__UpperCAmelCase : Optional[int] = sane_index_shape
def A( self , lowercase__):
__UpperCAmelCase : str = inds.shape
assert len(snake_case__) > 1
__UpperCAmelCase : Union[str, Any] = inds.reshape(ishape[0] , -1)
__UpperCAmelCase : str = self.used.to(snake_case__)
__UpperCAmelCase : str = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCAmelCase : Any = match.argmax(-1)
__UpperCAmelCase : int = match.sum(2) < 1
if self.unknown_index == "random":
__UpperCAmelCase : int = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
__UpperCAmelCase : Optional[int] = self.unknown_index
return new.reshape(snake_case__)
def A( self , lowercase__):
__UpperCAmelCase : Dict = inds.shape
assert len(snake_case__) > 1
__UpperCAmelCase : str = inds.reshape(ishape[0] , -1)
__UpperCAmelCase : Dict = self.used.to(snake_case__)
if self.re_embed > self.used.shape[0]: # extra token
__UpperCAmelCase : Dict = 0 # simply set to zero
__UpperCAmelCase : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__)
return back.reshape(snake_case__)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = z.permute(0 , 2 , 3 , 1).contiguous()
__UpperCAmelCase : Any = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCAmelCase : Optional[Any] = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight) , dim=1)
__UpperCAmelCase : Optional[Any] = self.embedding(snake_case__).view(z.shape)
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : str = None
# compute loss for embedding
if not self.legacy:
__UpperCAmelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
__UpperCAmelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
__UpperCAmelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCAmelCase : str = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
__UpperCAmelCase : Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
__UpperCAmelCase : Union[str, Any] = self.remap_to_used(snake_case__)
__UpperCAmelCase : Optional[Any] = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
__UpperCAmelCase : Optional[int] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A( self , lowercase__ , lowercase__):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__UpperCAmelCase : List[str] = indices.reshape(shape[0] , -1) # add batch axis
__UpperCAmelCase : Any = self.unmap_to_all(snake_case__)
__UpperCAmelCase : Tuple = indices.reshape(-1) # flatten again
# get quantized latent vectors
__UpperCAmelCase : str = self.embedding(snake_case__)
if shape is not None:
__UpperCAmelCase : Optional[Any] = z_q.view(snake_case__)
# reshape back to match original input shape
__UpperCAmelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , lowercase__ , lowercase__=False):
__UpperCAmelCase : str = parameters
__UpperCAmelCase : int = torch.chunk(snake_case__ , 2 , dim=1)
__UpperCAmelCase : int = torch.clamp(self.logvar , -3_0.0 , 2_0.0)
__UpperCAmelCase : str = deterministic
__UpperCAmelCase : Optional[Any] = torch.exp(0.5 * self.logvar)
__UpperCAmelCase : Any = torch.exp(self.logvar)
if self.deterministic:
__UpperCAmelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def A( self , lowercase__ = None):
__UpperCAmelCase : str = randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype)
__UpperCAmelCase : Union[str, Any] = self.mean + self.std * sample
return x
def A( self , lowercase__=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A( self , lowercase__ , lowercase__=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
__UpperCAmelCase : Tuple = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=snake_case__)
def A( self):
return self.mean
| 719 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = CpmAntTokenizer
_lowerCAmelCase : List[str] = False
def A( self):
super().setUp()
__UpperCAmelCase : str = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
@tooslow
def A( self):
__UpperCAmelCase : Union[str, Any] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''')
__UpperCAmelCase : int = '''今天天气真好!'''
__UpperCAmelCase : Union[str, Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
__UpperCAmelCase : Dict = '''今天天气真好!'''
__UpperCAmelCase : List[Any] = [tokenizer.bos_token] + tokens
__UpperCAmelCase : Dict = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
__UpperCAmelCase : Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 720 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowerCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowerCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''),
}) , )
def A( self , lowercase__ , lowercase__ , lowercase__ = 1 , lowercase__ = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case_ , hypotheses=snake_case_ , min_len=snake_case_ , max_len=snake_case_)
}
| 721 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : int = model_type_to_module_name(lowercase_ )
__UpperCAmelCase : int = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(lowercase_ , lowercase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase_ , '''__name__''' , lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : Any = importlib.import_module('''transformers''' )
if hasattr(lowercase_ , lowercase_ ):
return getattr(lowercase_ , lowercase_ )
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , **lowercase_ , ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = get_file_from_repo(
lowercase_ , lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , resume_download=lowercase_ , proxies=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , local_files_only=lowercase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase_ , encoding='''utf-8''' ) as reader:
return json.load(lowercase_ )
class __A :
def __init__( self):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase__)
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : str = kwargs.pop('''config''' , UpperCAmelCase__)
__UpperCAmelCase : List[Any] = kwargs.pop('''trust_remote_code''' , UpperCAmelCase__)
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase__ , **UpperCAmelCase__)
__UpperCAmelCase : Optional[Any] = config_dict.get('''feature_extractor_type''' , UpperCAmelCase__)
__UpperCAmelCase : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
__UpperCAmelCase : int = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
# It could be in `config.feature_extractor_type``
__UpperCAmelCase : int = getattr(UpperCAmelCase__ , '''feature_extractor_type''' , UpperCAmelCase__)
if hasattr(UpperCAmelCase__ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
__UpperCAmelCase : int = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__UpperCAmelCase : int = feature_extractor_class_from_name(UpperCAmelCase__)
__UpperCAmelCase : List[str] = feature_extractor_auto_map is not None
__UpperCAmelCase : List[Any] = feature_extractor_class is not None or type(UpperCAmelCase__) in FEATURE_EXTRACTOR_MAPPING
__UpperCAmelCase : Any = resolve_trust_remote_code(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
if has_remote_code and trust_remote_code:
__UpperCAmelCase : Any = get_class_from_dynamic_module(
UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__)
__UpperCAmelCase : Union[str, Any] = kwargs.pop('''code_revision''' , UpperCAmelCase__)
if os.path.isdir(UpperCAmelCase__):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase__) in FEATURE_EXTRACTOR_MAPPING:
__UpperCAmelCase : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase__)]
return feature_extractor_class.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def A( lowercase__ , lowercase__):
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase__ , UpperCAmelCase__)
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase__ ):
_lowerCAmelCase : List[str] = ['''input_features''', '''is_longer''']
def __init__( self , lowercase__=6_4 , lowercase__=4_8_0_0_0 , lowercase__=4_8_0 , lowercase__=1_0 , lowercase__=1_0_2_4 , lowercase__=0.0 , lowercase__=False , lowercase__ = 0 , lowercase__ = 1_4_0_0_0 , lowercase__ = None , lowercase__ = "fusion" , lowercase__ = "repeatpad" , **lowercase__ , ):
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : List[Any] = top_db
__UpperCAmelCase : Tuple = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : Dict = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : Tuple = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : List[Any] = max_length_s * sampling_rate
__UpperCAmelCase : List[Any] = sampling_rate
__UpperCAmelCase : Dict = frequency_min
__UpperCAmelCase : List[str] = frequency_max
__UpperCAmelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCamelCase , min_frequency=__lowerCamelCase , max_frequency=__lowerCamelCase , sampling_rate=__lowerCamelCase , norm=__lowerCamelCase , mel_scale='''htk''' , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCamelCase , min_frequency=__lowerCamelCase , max_frequency=__lowerCamelCase , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def A( self):
__UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Optional[Any] = spectrogram(
__lowerCamelCase , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowerCamelCase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Optional[Any] = [0]
# randomly choose index for each part
__UpperCAmelCase : int = np.random.choice(ranges[0])
__UpperCAmelCase : Optional[Any] = np.random.choice(ranges[1])
__UpperCAmelCase : Tuple = np.random.choice(ranges[2])
__UpperCAmelCase : Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : Any = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Union[str, Any] = torch.tensor(mel[None, None, :])
__UpperCAmelCase : Optional[int] = torch.nn.functional.interpolate(
__lowerCamelCase , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=__lowerCamelCase)
__UpperCAmelCase : str = mel_shrink[0][0].numpy()
__UpperCAmelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : Union[str, Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : Union[str, Any] = len(__lowerCamelCase) - max_length
__UpperCAmelCase : List[str] = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : Any = waveform[idx : idx + max_length]
__UpperCAmelCase : List[str] = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters)
__UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : str = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : int = False
else:
__UpperCAmelCase : List[str] = self._random_mel_fusion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
__UpperCAmelCase : Optional[Any] = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : str = int(max_length / len(__lowerCamelCase))
__UpperCAmelCase : int = np.stack(np.tile(__lowerCamelCase , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Any = int(max_length / len(__lowerCamelCase))
__UpperCAmelCase : Any = np.stack(np.tile(__lowerCamelCase , __lowerCamelCase))
__UpperCAmelCase : List[Any] = np.pad(__lowerCamelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Tuple = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters)
__UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Any = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : int = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
__UpperCAmelCase : int = isinstance(__lowerCamelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Optional[int] = [np.asarray(__lowerCamelCase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray):
__UpperCAmelCase : int = np.asarray(__lowerCamelCase , dtype=np.floataa)
elif isinstance(__lowerCamelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : Tuple = [np.asarray(__lowerCamelCase)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : str = [
self._get_input_mel(__lowerCamelCase , max_length if max_length else self.nb_max_samples , __lowerCamelCase , __lowerCamelCase)
for waveform in raw_speech
]
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(__lowerCamelCase)
is_longer.append(__lowerCamelCase)
if truncation == "fusion" and sum(__lowerCamelCase) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Any = np.random.randint(0 , len(__lowerCamelCase))
__UpperCAmelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __lowerCamelCase):
__UpperCAmelCase : Any = [np.asarray(__lowerCamelCase , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : Any = [[longer] for longer in is_longer]
__UpperCAmelCase : Optional[int] = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCAmelCase : Dict = BatchFeature(__lowerCamelCase)
if return_tensors is not None:
__UpperCAmelCase : Any = input_features.convert_to_tensors(__lowerCamelCase)
return input_features
| 702 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"vocab_file": "spiece.model"}
lowerCAmelCase = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCamelCase ( UpperCAmelCase_ ):
def __init__( self , lowercase__ , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<sep>" , lowercase__="<pad>" , lowercase__="<cls>" , lowercase__="<mask>" , lowercase__=["<eop>", "<eod>"] , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
__UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : Any = remove_space
__UpperCAmelCase : Union[str, Any] = keep_accents
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''')
__UpperCAmelCase : Dict = jieba
__UpperCAmelCase : List[str] = str.maketrans(''' \n''' , '''\u2582\u2583''')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A( self):
return len(self.sp_model)
def A( self):
__UpperCAmelCase : Dict = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
__UpperCAmelCase : Tuple = self.__dict__.copy()
__UpperCAmelCase : str = None
return state
def __setstate__( self , lowercase__):
__UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A( self , lowercase__):
if self.remove_space:
__UpperCAmelCase : Tuple = ''' '''.join(inputs.strip().split())
else:
__UpperCAmelCase : Dict = inputs
__UpperCAmelCase : Dict = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
__UpperCAmelCase : str = unicodedata.normalize('''NFKD''' , _lowercase)
__UpperCAmelCase : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
__UpperCAmelCase : List[str] = outputs.lower()
return outputs
def A( self , lowercase__):
__UpperCAmelCase : Dict = self.preprocess_text(_lowercase)
__UpperCAmelCase : int = self.sp_model.encode(_lowercase , out_type=_lowercase)
__UpperCAmelCase : Union[str, Any] = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
__UpperCAmelCase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__UpperCAmelCase : str = cur_pieces[1:]
else:
__UpperCAmelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def A( self , lowercase__):
return self.sp_model.PieceToId(_lowercase)
def A( self , lowercase__):
return self.sp_model.IdToPiece(_lowercase)
def A( self , lowercase__):
__UpperCAmelCase : List[Any] = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(_lowercase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : Optional[int] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
__UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
def A( self , *lowercase__ , **lowercase__):
__UpperCAmelCase : int = super()._decode(*_lowercase , **_lowercase)
__UpperCAmelCase : Optional[int] = text.replace(''' ''' , '''''').replace('''\u2582''' , ''' ''').replace('''\u2583''' , '''\n''')
return text
| 703 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.