code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import requests
_snake_case = '''''' # <-- Put your OpenWeatherMap appid here!
_snake_case = '''https://api.openweathermap.org/data/2.5/'''
def lowercase_( SCREAMING_SNAKE_CASE_ = "Chicago" , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ = "Kolkata, India" , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ = 55.68 , SCREAMING_SNAKE_CASE_ = 12.57 , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_snake_case = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 283 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 1 |
'''simple docstring'''
import argparse
_lowerCAmelCase = "docs/source/_static/js/custom.js"
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
with open(a , encoding='''utf-8''' , newline='''\n''' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__magic_name__ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowerCAmelCase = parser.parse_args()
update_custom_js(args.version)
| 98 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE :Optional[Any] = frozenset([] )
def snake_case__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__magic_name__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionInpaintPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = sd_pipe(**a__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self : List[str] ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 98 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : str ):
snake_case_ : List[str] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Optional[int] ):
with self.assertRaises(lowercase_ ):
snake_case_ : int = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Union[str, Any] ):
with self.assertRaises(lowercase_ ):
snake_case_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ : Union[str, Any] = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def _snake_case ( self : int ):
snake_case_ : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ):
snake_case_ : Tuple = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : Dict ):
snake_case_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _snake_case ( self : List[Any] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ : Dict = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def _snake_case ( self : Dict ):
snake_case_ : Dict = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ):
import PIL.Image
snake_case_ : List[str] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowercase_ ) as mock_cast_to_python_objects:
snake_case_ : Union[str, Any] = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
snake_case_, snake_case_ : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , lowercase_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def __lowercase ( _a , _a ):
snake_case_ : List[Any] = pa.BufferReader(_a ) if isinstance(_a , pa.Buffer ) else pa.memory_map(_a )
snake_case_ : int = pa.ipc.open_stream(_a )
snake_case_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __lowercase ( _a , _a ):
snake_case_ : Optional[Any] = pa.BufferOutputStream()
snake_case_ : Optional[Any] = pa.schema(_a ) if fields else None
with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_, snake_case_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Dict = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
snake_case_ : Tuple = pa.BufferOutputStream()
snake_case_ : Optional[Any] = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=_a , features=_a ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
snake_case_, snake_case_ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ : str = pa.BufferReader(output.getvalue() )
snake_case_ : str = pa.ipc.open_stream(_a )
snake_case_ : pa.Table = f.read_all()
snake_case_ : Dict = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_a )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def __lowercase ( _a ):
snake_case_ : Any = pa.BufferOutputStream()
with ArrowWriter(
stream=_a , writer_batch_size=_a , hash_salt='''split_name''' , check_duplicates=_a , ) as writer:
with pytest.raises(_a ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
snake_case_, snake_case_ : List[str] = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __lowercase ( _a ):
snake_case_ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=_a , writer_batch_size=_a , hash_salt='''split_name''' , check_duplicates=_a , ) as writer:
with pytest.raises(_a ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
snake_case_, snake_case_ : Dict = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __lowercase ( _a ):
snake_case_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=_a , writer_batch_size=_a , hash_salt='''split_name''' , check_duplicates=_a , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
snake_case_, snake_case_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __lowercase ( _a , _a ):
snake_case_ : List[Any] = pa.BufferOutputStream()
snake_case_ : str = pa.schema(_a ) if fields else None
with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
snake_case_, snake_case_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __lowercase ( _a , _a ):
snake_case_ : Any = pa.BufferOutputStream()
snake_case_ : List[str] = pa.schema(_a ) if fields else None
with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
snake_case_, snake_case_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Dict = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = pa.BufferOutputStream()
snake_case_ : Dict = pa.schema(_a ) if fields else None
with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
snake_case_, snake_case_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
snake_case_ : Any = os.path.join(_a , '''test.arrow''' )
with ArrowWriter(path=_a , schema=pa.schema(_a ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
snake_case_, snake_case_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata )
_check_output(_a , 1 )
def __lowercase ( _a ):
if pa.types.is_list(_a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowercase ( _a , _a ):
if isinstance(lst[0] , _a ):
change_first_primitive_element_in_list(lst[0] , _a )
else:
snake_case_ : Any = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( _a , _a , _a ):
snake_case_ : List[Any] = pa.array(TypedSequence(_a , optimized_int_type=_a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( _a , _a , _a ):
# in range
snake_case_ : int = pa.array(OptimizedTypedSequence(_a , col=_a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ : int = copy.deepcopy(_a )
snake_case_ : str = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_a , _a )
snake_case_ : Dict = pa.array(OptimizedTypedSequence(_a , col=_a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def __lowercase ( _a , _a ):
snake_case_ : Tuple = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=_a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowercase ( _a ):
snake_case_ : Any = '''mock://dataset-train.arrow'''
with ArrowWriter(path=_a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_, snake_case_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_a )
def __lowercase ( ):
snake_case_ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=_a ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
snake_case_, snake_case_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ : int = pa.BufferReader(output.getvalue() )
snake_case_ : pa.Table = pq.read_table(_a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def __lowercase ( _a , _a ):
import PIL.Image
snake_case_ : Optional[Any] = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_a , format='''png''' )
snake_case_ : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=_a , features=Features({'''image''': Image()} ) , embed_local_files=_a ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
snake_case_ : Any = pa.BufferReader(output.getvalue() )
snake_case_ : pa.Table = pq.read_table(_a )
snake_case_ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , _a )
with open(_a , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowercase ( ):
snake_case_ : Union[str, Any] = pa.schema([pa.field('''col_1''' , pa.string() , nullable=_a )] )
snake_case_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(stream=_a ) as writer:
writer._build_writer(inferred_schema=_a )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 264 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 1 |
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self ):
a :Tuple = psutil.Process()
a :str = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = -1
while True:
a :Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = True
a :List[str] = threading.Thread(target=self.peak_monitor )
a :List[str] = True
self.thread.start()
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = False
self.thread.join()
return self.cpu_memory_peak
snake_case : Any = PeakCPUMemory()
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a :Any = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a :Optional[int] = torch.cuda.memory_allocated(UpperCAmelCase_ )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :str = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a :int = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
a :Optional[Any] = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a :Dict = (torch.cuda.memory_allocated(UpperCAmelCase_ ) - start_measures[str(UpperCAmelCase_ )]) / 2**20
a :List[str] = (torch.cuda.max_memory_allocated(UpperCAmelCase_ ) - start_measures[str(UpperCAmelCase_ )]) / 2**20
return measures
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(UpperCAmelCase_ )]:.2f}MiB''' )
a :Union[str, Any] = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 281 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Dict = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'data2vec-vision'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Tuple = hidden_size
a :Any = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Dict = intermediate_size
a :List[Any] = hidden_act
a :List[str] = hidden_dropout_prob
a :Union[str, Any] = attention_probs_dropout_prob
a :Any = initializer_range
a :Any = layer_norm_eps
a :Union[str, Any] = image_size
a :int = patch_size
a :Optional[int] = num_channels
a :Union[str, Any] = use_mask_token
a :Optional[Any] = use_absolute_position_embeddings
a :Tuple = use_relative_position_bias
a :List[Any] = use_shared_relative_position_bias
a :Dict = layer_scale_init_value
a :Optional[int] = drop_path_rate
a :List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
a :str = out_indices
a :Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
a :List[Any] = use_auxiliary_head
a :List[Any] = auxiliary_loss_weight
a :Optional[int] = auxiliary_channels
a :List[str] = auxiliary_num_convs
a :str = auxiliary_concat_input
a :Union[str, Any] = semantic_loss_ignore_index
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 281 | 1 |
"""simple docstring"""
import re
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = split_input(lowerCAmelCase_ )
if upper:
__SCREAMING_SNAKE_CASE = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__SCREAMING_SNAKE_CASE = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return to_simple_case(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = to_simple_case(lowerCAmelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , "_" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , "-" )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 54 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = 'segformer'
def __init__( self : Dict , __lowercase : str=3 , __lowercase : Dict=4 , __lowercase : Any=[2, 2, 2, 2] , __lowercase : Optional[int]=[8, 4, 2, 1] , __lowercase : List[str]=[32, 64, 160, 256] , __lowercase : Union[str, Any]=[7, 3, 3, 3] , __lowercase : Optional[int]=[4, 2, 2, 2] , __lowercase : Any=[1, 2, 5, 8] , __lowercase : List[str]=[4, 4, 4, 4] , __lowercase : Any="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Optional[Any]=0.1 , __lowercase : int=0.02 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-6 , __lowercase : Tuple=256 , __lowercase : List[Any]=255 , **__lowercase : Union[str, Any] , ) -> List[Any]:
super().__init__(**__lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __lowercase , )
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = num_encoder_blocks
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Dict = sr_ratios
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[Any] = strides
__UpperCAmelCase : List[str] = mlp_ratios
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Any = classifier_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Optional[Any] = drop_path_rate
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Dict = decoder_hidden_size
__UpperCAmelCase : Tuple = kwargs.get("""reshape_last_stage""" , __lowercase )
__UpperCAmelCase : int = semantic_loss_ignore_index
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self : str ) -> int:
return 12
| 114 | 0 |
'''simple docstring'''
def A_( A : int , A : int):
return int((input_a, input_a).count(0) == 0)
def A_( ):
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 251 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """timesformer"""
def __init__( self , A_=224 , A_=16 , A_=3 , A_=8 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-6 , A_=True , A_="divided_space_time" , A_=0 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = num_frames
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = qkv_bias
UpperCamelCase = attention_type
UpperCamelCase = drop_path_rate
| 251 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger(__name__)
A__: Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
for attribute in key.split("."):
UpperCamelCase__: List[str] = getattr(A_ ,A_)
if weight_type is not None:
UpperCamelCase__: Union[str, Any] = getattr(A_ ,A_).shape
else:
UpperCamelCase__: Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__: Dict = value
elif weight_type == "weight_g":
UpperCamelCase__: Optional[int] = value
elif weight_type == "weight_v":
UpperCamelCase__: int = value
elif weight_type == "bias":
UpperCamelCase__: List[Any] = value
else:
UpperCamelCase__: Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: Any = []
UpperCamelCase__: Optional[Any] = fairseq_model.state_dict()
UpperCamelCase__: str = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__: Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
A_ ,A_ ,A_ ,A_ ,hf_model.config.feat_extract_norm == "group" ,)
UpperCamelCase__: Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__: Optional[int] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model.")[-1] == name.split(".")[0] and not is_finetuned):
UpperCamelCase__: List[Any] = True
if "*" in mapped_key:
UpperCamelCase__: Optional[int] = name.split(A_)[0].split(".")[-2]
UpperCamelCase__: List[str] = mapped_key.replace("*" ,A_)
if "weight_g" in name:
UpperCamelCase__: List[str] = "weight_g"
elif "weight_v" in name:
UpperCamelCase__: Any = "weight_v"
elif "weight" in name:
UpperCamelCase__: Union[str, Any] = "weight"
elif "bias" in name:
UpperCamelCase__: Any = "bias"
else:
UpperCamelCase__: Tuple = None
set_recursively(A_ ,A_ ,A_ ,A_ ,A_)
continue
if not is_used:
unused_weights.append(A_)
logger.warning(F"Unused weights: {unused_weights}")
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
UpperCamelCase__: Optional[Any] = full_name.split("conv_layers.")[-1]
UpperCamelCase__: Any = name.split(".")
UpperCamelCase__: str = int(items[0])
UpperCamelCase__: Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__: Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__: Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__: List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__: Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(A_)
@torch.no_grad()
def lowerCAmelCase_ ( A_ ,A_ ,A_=None ,A_=None ,A_=True):
if config_path is not None:
UpperCamelCase__: List[str] = HubertConfig.from_pretrained(A_)
else:
UpperCamelCase__: Optional[Any] = HubertConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__: Tuple = Dictionary.load(A_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__: Any = target_dict.pad_index
UpperCamelCase__: str = target_dict.bos_index
UpperCamelCase__: Union[str, Any] = target_dict.eos_index
UpperCamelCase__: Optional[Any] = len(target_dict.symbols)
UpperCamelCase__: Union[str, Any] = os.path.join(A_ ,"vocab.json")
if not os.path.isdir(A_):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A_))
return
os.makedirs(A_ ,exist_ok=A_)
with open(A_ ,"w" ,encoding="utf-8") as vocab_handle:
json.dump(target_dict.indices ,A_)
UpperCamelCase__: int = WavaVecaCTCTokenizer(
A_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=A_ ,)
UpperCamelCase__: Dict = True if config.feat_extract_norm == "layer" else False
UpperCamelCase__: str = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=A_ ,return_attention_mask=A_ ,)
UpperCamelCase__: List[str] = WavaVecaProcessor(feature_extractor=A_ ,tokenizer=A_)
processor.save_pretrained(A_)
UpperCamelCase__: Optional[Any] = HubertForCTC(A_)
else:
UpperCamelCase__: List[Any] = HubertModel(A_)
if is_finetuned:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__: int = model[0].eval()
recursively_load_weights(A_ ,A_ ,A_)
hf_wavavec.save_pretrained(A_)
if __name__ == "__main__":
A__: Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A__: Tuple = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 149 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__: str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase :int = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_UpperCAmelCase , _UpperCAmelCase : int = array[indexa], array[indexa]
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if length > 1:
_UpperCAmelCase : str = int(length / 2 )
for i in range(UpperCamelCase__ , low + middle ):
comp_and_swap(UpperCamelCase__ , UpperCamelCase__ , i + middle , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
bitonic_merge(UpperCamelCase__ , low + middle , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if length > 1:
_UpperCAmelCase : str = int(length / 2 )
bitonic_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
bitonic_sort(UpperCamelCase__ , low + middle , UpperCamelCase__ , 0 )
bitonic_merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Any = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 68 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
__lowerCAmelCase = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
__lowerCAmelCase = current_sum - array[i] + array[i + k]
__lowerCAmelCase = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
UpperCamelCase__ = [randint(-1000, 1000) for i in range(100)]
UpperCamelCase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 92 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : str=7, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Dict=True, lowerCAmelCase : List[str]=True, lowerCAmelCase : List[Any]=99, lowerCAmelCase : Tuple=32, lowerCAmelCase : int=2, lowerCAmelCase : Dict=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Any="gelu", lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Optional[int]=512, lowerCAmelCase : Dict=16, lowerCAmelCase : Tuple=2, lowerCAmelCase : Union[str, Any]=0.02, lowerCAmelCase : str=3, lowerCAmelCase : Any=4, lowerCAmelCase : List[str]=None, lowerCAmelCase : Union[str, Any]=1000, ) -> Dict:
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : List[Any] = seq_length
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : int = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : str = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : str = initializer_range
lowercase : Any = num_labels
lowercase : List[Any] = num_choices
lowercase : Optional[int] = scope
lowercase : str = range_bbox
def lowercase ( self : str ) -> Optional[int]:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : str = bbox[i, j, 3]
lowercase : Tuple = bbox[i, j, 1]
lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Optional[int] = bbox[i, j, 2]
lowercase : List[str] = bbox[i, j, 0]
lowercase : Union[str, Any] = t
lowercase : Any = tf.convert_to_tensor(lowerCAmelCase )
lowercase : Optional[Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Dict = None
lowercase : List[str] = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : int, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Dict:
lowercase : Dict = TFLayoutLMModel(config=lowerCAmelCase )
lowercase : str = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Union[str, Any] = model(lowerCAmelCase, lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Any = model(lowerCAmelCase, lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Tuple, lowerCAmelCase : List[str] ) -> Any:
lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=lowerCAmelCase )
lowercase : List[Any] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> List[str]:
lowercase : Optional[Any] = self.num_labels
lowercase : Optional[int] = TFLayoutLMForSequenceClassification(config=lowerCAmelCase )
lowercase : Tuple = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict ) -> Dict:
lowercase : Optional[int] = self.num_labels
lowercase : int = TFLayoutLMForTokenClassification(config=lowerCAmelCase )
lowercase : List[str] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple ) -> Optional[Any]:
lowercase : List[str] = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase )
lowercase : Optional[int] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def lowercase ( self : Tuple ) -> int:
lowercase : int = TFLayoutLMModelTester(self )
lowercase : int = ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : str ) -> List[Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Tuple:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase ( self : int ) -> List[str]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase ( self : Dict ) -> int:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowercase ( self : List[str] ) -> Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFLayoutLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase ( self : List[Any] ) -> List[Any]:
pass
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Any = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
lowercase : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase : List[str] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
lowercase : Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase : List[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ) -> str:
lowercase : Any = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the sequence output on [0, :3, :3]
lowercase : Dict = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase : Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], lowerCAmelCase, atol=1e-3 ) )
@slow
def lowercase ( self : List[Any] ) -> Any:
# initialize model with randomly initialized sequence classification head
lowercase : List[str] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
lowercase : List[str] = outputs.loss
lowercase : List[Any] = (2,)
self.assertEqual(loss.shape, lowerCAmelCase )
# test the shape of the logits
lowercase : str = outputs.logits
lowercase : List[str] = (2, 2)
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : List[Any] ) -> str:
# initialize model with randomly initialized token classification head
lowercase : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
# test the shape of the logits
lowercase : Union[str, Any] = outputs.logits
lowercase : Union[str, Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
# initialize model with randomly initialized token classification head
lowercase : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : int = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the shape of the logits
lowercase : str = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape, lowerCAmelCase )
| 255 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
A_ : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = {}
with open(lowerCAmelCase_ , """r""" ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : int = line.strip()
if line:
_UpperCAmelCase : Tuple = line.split()
_UpperCAmelCase : Optional[int] = line_number
_UpperCAmelCase : str = words[0]
_UpperCAmelCase : Optional[Any] = value
return result
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
_UpperCAmelCase : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_UpperCAmelCase : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
_UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase : Optional[Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[str] = shape_pointer.shape
# let's reduce dimension
_UpperCAmelCase : str = value[0]
else:
_UpperCAmelCase : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
_UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_UpperCAmelCase : Dict = value
elif weight_type == "bias":
_UpperCAmelCase : int = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_UpperCAmelCase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = value
else:
_UpperCAmelCase : Tuple = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_UpperCAmelCase : Any = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_UpperCAmelCase : Optional[Any] = """param"""
if weight_type is not None and weight_type != "param":
_UpperCAmelCase : List[str] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase : Dict = """.""".join([key, hf_param_name] )
else:
_UpperCAmelCase : int = key
_UpperCAmelCase : int = value if """lm_head""" in full_key else value[0]
A_ : Union[str, Any] = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None )-> Any:
'''simple docstring'''
_UpperCAmelCase : int = False
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : Tuple = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_UpperCAmelCase : Tuple = True
if "*" in mapped_key:
_UpperCAmelCase : Dict = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
_UpperCAmelCase : Dict = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "weight_g" in name:
_UpperCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name:
_UpperCAmelCase : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : List[str] = """weight"""
else:
_UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Any = []
_UpperCAmelCase : Dict = fairseq_model.state_dict()
_UpperCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase : int = True
else:
_UpperCAmelCase : Dict = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase : Any = name.split(""".""" )
_UpperCAmelCase : Any = int(items[0] )
_UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_UpperCAmelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_UpperCAmelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_UpperCAmelCase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_UpperCAmelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False )-> Optional[int]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
_UpperCAmelCase : Optional[Any] = WavaVecaConfig()
if is_seq_class:
_UpperCAmelCase : List[str] = read_txt_into_dict(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : List[str] = WavaVecaForSequenceClassification(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
_UpperCAmelCase : str = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase : str = target_dict.pad_index
_UpperCAmelCase : List[str] = target_dict.bos_index
_UpperCAmelCase : int = target_dict.eos_index
_UpperCAmelCase : Optional[Any] = len(target_dict.symbols )
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_UpperCAmelCase : int = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase : Any = 0
_UpperCAmelCase : int = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
_UpperCAmelCase : int = True if config.feat_extract_norm == """layer""" else False
_UpperCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : str = WavaVecaForCTC(lowerCAmelCase_ )
else:
_UpperCAmelCase : int = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_UpperCAmelCase : Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
_UpperCAmelCase : List[str] = fairseq.tasks.setup_task(lowerCAmelCase_ )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
_UpperCAmelCase : str = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
A_ : Optional[int] = parser.parse_args()
A_ : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = CTRLTokenizer
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Optional[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowerCAmelCase : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase : List[str] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowerCAmelCase : Optional[int] = {'unk_token': '<unk>'}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = 'adapt react readapt apt'
_lowerCAmelCase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : List[str] = 'adapt react readapt apt'
_lowerCAmelCase : Tuple = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->int:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
raise NotImplementedError()
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowercase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__lowercase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ : str = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
datasets.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ : Dict = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ : str = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : str = train_dataset.features["""label"""].names
if training_args.do_eval:
lowerCAmelCase__ : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowerCAmelCase__ : Union[str, Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Union[str, Any] = predict_dataset.features["""label"""].names
# Labels
lowerCAmelCase__ : List[Any] = len(UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , idalabel={str(UpperCamelCase ): label for i, label in enumerate(UpperCamelCase )} , labelaid={label: i for i, label in enumerate(UpperCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ : int = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ : Optional[Any] = False
def preprocess_function(UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=UpperCamelCase , max_length=data_args.max_seq_length , truncation=UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ : Optional[Any] = min(len(UpperCamelCase ) , data_args.max_train_samples )
lowerCAmelCase__ : Any = train_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase__ : int = train_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ : List[str] = min(len(UpperCamelCase ) , data_args.max_eval_samples )
lowerCAmelCase__ : Optional[Any] = eval_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase__ : List[str] = eval_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ : List[str] = min(len(UpperCamelCase ) , data_args.max_predict_samples )
lowerCAmelCase__ : Tuple = predict_dataset.select(range(UpperCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowerCAmelCase__ : str = predict_dataset.map(
UpperCamelCase , batched=UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
lowerCAmelCase__ : int = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
lowerCAmelCase__ : int = p.predictions[0] if isinstance(p.predictions , UpperCamelCase ) else p.predictions
lowerCAmelCase__ : Optional[int] = np.argmax(UpperCamelCase , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ : Dict = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ : Tuple = DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ : int = None
# Initialize our Trainer
lowerCAmelCase__ : Any = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
lowerCAmelCase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ : Dict = last_checkpoint
lowerCAmelCase__ : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = train_result.metrics
lowerCAmelCase__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase )
)
lowerCAmelCase__ : List[Any] = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCamelCase )
trainer.save_metrics("""train""" , UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase__ : List[str] = trainer.evaluate(eval_dataset=UpperCamelCase )
lowerCAmelCase__ : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.log_metrics("""eval""" , UpperCamelCase )
trainer.save_metrics("""eval""" , UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = trainer.predict(UpperCamelCase , metric_key_prefix="""predict""" )
lowerCAmelCase__ : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase )
)
lowerCAmelCase__ : Optional[Any] = min(UpperCamelCase , len(UpperCamelCase ) )
trainer.log_metrics("""predict""" , UpperCamelCase )
trainer.save_metrics("""predict""" , UpperCamelCase )
lowerCAmelCase__ : str = np.argmax(UpperCamelCase , axis=1 )
lowerCAmelCase__ : Optional[Any] = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Tuple = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 184 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[Union[str, Path]] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[int] = None
__lowercase : int = 1
__lowercase : Optional[Union[str, bool]] = None
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
def UpperCAmelCase_ ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 184 | 1 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def a_ ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 77 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[Any] =logging.get_logger(__name__)
__snake_case : Union[str, Any] ={'vocab_file': 'spm_char.model'}
__snake_case : List[str] ={
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
__snake_case : Union[str, Any] ={
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
def __init__(self ,__lowerCamelCase ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__(self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
lowerCAmelCase__ : str = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Dict = [1]
if token_ids_a is None:
return ([0] * len(__lowerCamelCase )) + suffix_ones
return ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,'''wb''' ) as fi:
lowerCAmelCase__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 129 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _A :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=sys.maxsize ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = '''bilinear'''
__UpperCAmelCase : List[str] = max_size
__UpperCAmelCase : List[Any] = short_edge_length
def __call__( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = []
for img in imgs:
__UpperCAmelCase : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCAmelCase : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCAmelCase : List[Any] = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
__UpperCAmelCase : Dict = size, scale * w
else:
__UpperCAmelCase : Optional[Any] = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
__UpperCAmelCase : Tuple = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
__UpperCAmelCase : List[Any] = newh * scale
__UpperCAmelCase : int = neww * scale
__UpperCAmelCase : Dict = int(neww + 0.5 )
__UpperCAmelCase : Dict = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCAmelCase : Optional[Any] = Image.fromarray(snake_case_ )
__UpperCAmelCase : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__UpperCAmelCase : Optional[Any] = np.asarray(snake_case_ )
else:
__UpperCAmelCase : Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCAmelCase : Any = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class _A :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__UpperCAmelCase : Union[str, Any] = cfg.INPUT.FORMAT
__UpperCAmelCase : Dict = cfg.SIZE_DIVISIBILITY
__UpperCAmelCase : List[Any] = cfg.PAD_VALUE
__UpperCAmelCase : Dict = cfg.INPUT.MAX_SIZE_TEST
__UpperCAmelCase : List[Any] = cfg.MODEL.DEVICE
__UpperCAmelCase : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase : int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase : Union[str, Any] = lambda __UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
__UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images]
__UpperCAmelCase : Optional[int] = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase : str = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__UpperCAmelCase : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
__UpperCAmelCase : int = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCAmelCase : str = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
__UpperCAmelCase : List[Any] = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCAmelCase : str = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
__UpperCAmelCase : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
| 351 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
_lowerCamelCase : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ = logging.getLogger(__name__)
def _snake_case ( lowercase__ , lowercase__ ):
if metric == "rouge2":
_lowerCamelCase : str = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCamelCase : List[str] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCamelCase : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_lowerCamelCase : Dict = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowerCamelCase : List[Any] = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( lowercase__ , lowercase__ ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=lowercase__ , verbose=lowercase__ , )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def A_ ( self , lowercase , lowercase , lowercase , lowercase=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowerCamelCase : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCamelCase : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase : Union[str, Any] = od / 'test_results.txt'
_lowerCamelCase : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase : int = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowerCamelCase : str = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , 'a+' ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase : Tuple = metrics[key]
if isinstance(lowercase , torch.Tensor ):
_lowerCamelCase : str = val.item()
_lowerCamelCase : Tuple = F'''{key}: {val:.6f}\n'''
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase : Any = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowercase )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
try:
_lowerCamelCase : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase : str = pl_module.model.num_parameters()
_lowerCamelCase : List[Any] = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , 'test' )
@rank_zero_only
def A_ ( self , lowercase , lowercase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 96 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :dict) -> bool:
lowercase :set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase :set[int] = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_)
for node in graph)
def lowerCamelCase (a_ :dict , a_ :int , a_ :set , a_ :set) -> bool:
visited.add(a_)
rec_stk.add(a_)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "layoutlmv3"
def __init__( self : int , snake_case__ : Any=5_0_2_6_5 , snake_case__ : int=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Union[str, Any]=3_0_7_2 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Optional[int]=2 , snake_case__ : int=1_0_2_4 , snake_case__ : str=1_2_8 , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=1_2_8 , snake_case__ : List[Any]=6_4 , snake_case__ : List[Any]=2_5_6 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=2_2_4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase :Optional[int] = max_ad_position_embeddings
lowercase :Tuple = coordinate_size
lowercase :Any = shape_size
lowercase :Union[str, Any] = has_relative_attention_bias
lowercase :Optional[Any] = rel_pos_bins
lowercase :Tuple = max_rel_pos
lowercase :Any = has_spatial_attention_bias
lowercase :Any = rel_ad_pos_bins
lowercase :str = max_rel_ad_pos
lowercase :int = text_embed
lowercase :Optional[int] = visual_embed
lowercase :str = input_size
lowercase :List[str] = num_channels
lowercase :str = patch_size
lowercase :Any = classifier_dropout
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = version.parse("1.12" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return 1e-5
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1_2
def __snake_case ( self : str , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 4_0 , snake_case__ : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase :Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
lowercase :List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase :Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase :List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase :List[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :Dict = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 172 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
random.seed(SCREAMING_SNAKE_CASE )
np.random.seed(SCREAMING_SNAKE_CASE )
torch.manual_seed(SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 0.9999 , snake_case__ = 0.0 , snake_case__ = 0 , snake_case__ = False , snake_case__ = 1.0 , snake_case__ = 2 / 3 , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
lowerCAmelCase : List[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCAmelCase : Tuple = True
if kwargs.get("max_value" , _SCREAMING_SNAKE_CASE ) is not None:
lowerCAmelCase : Union[str, Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate("max_value" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = kwargs['''max_value''']
if kwargs.get("min_value" , _SCREAMING_SNAKE_CASE ) is not None:
lowerCAmelCase : Tuple = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate("min_value" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = kwargs['''min_value''']
lowerCAmelCase : int = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device" , _SCREAMING_SNAKE_CASE ) is not None:
lowerCAmelCase : int = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate("device" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
self.to(device=kwargs["device"] )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Any = decay
lowerCAmelCase : List[str] = min_decay
lowerCAmelCase : Any = update_after_step
lowerCAmelCase : Tuple = use_ema_warmup
lowerCAmelCase : Optional[Any] = inv_gamma
lowerCAmelCase : Dict = power
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = None # set in `step()`
lowerCAmelCase : int = model_cls
lowerCAmelCase : Dict = model_config
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = model_cls.load_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = model_cls.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = cls(model.parameters() , model_cls=_SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(_SCREAMING_SNAKE_CASE )
return ema_model
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
lowerCAmelCase : List[str] = self.model_cls.from_config(self.model_config )
lowerCAmelCase : Dict = self.state_dict()
state_dict.pop("shadow_params" , _SCREAMING_SNAKE_CASE )
model.register_to_config(**_SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCAmelCase : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCAmelCase : Union[str, Any] = (1 + step) / (10 + step)
lowerCAmelCase : Optional[int] = min(_SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
lowerCAmelCase : List[str] = max(_SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
lowerCAmelCase : Union[str, Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase : int = parameters.parameters()
lowerCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCAmelCase : Tuple = self.get_decay(self.optimization_step )
lowerCAmelCase : str = decay
lowerCAmelCase : List[Any] = 1 - decay
lowerCAmelCase : int = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCAmelCase : Optional[Any] = deepspeed.zero.GatheredParameters(_SCREAMING_SNAKE_CASE , modifier_rank=_SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_SCREAMING_SNAKE_CASE )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Any = [
p.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=_SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def lowercase__ ( self ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCAmelCase : Tuple = None
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = copy.deepcopy(_SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
lowerCAmelCase : List[str] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid min_decay" )
lowerCAmelCase : Union[str, Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid optimization_step" )
lowerCAmelCase : Tuple = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid update_after_step" )
lowerCAmelCase : Optional[int] = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid use_ema_warmup" )
lowerCAmelCase : Tuple = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
lowerCAmelCase : int = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
lowerCAmelCase : List[str] = state_dict.get("shadow_params" , _SCREAMING_SNAKE_CASE )
if shadow_params is not None:
lowerCAmelCase : Any = shadow_params
if not isinstance(self.shadow_params , _SCREAMING_SNAKE_CASE ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 108 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->List[str]:
'''simple docstring'''
A_ : str = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : Dict = num_channels
A_ : Tuple = embeddings_size
A_ : Union[str, Any] = hidden_sizes
A_ : Dict = depths
A_ : str = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_act
A_ : Optional[Any] = num_labels
A_ : Tuple = scope
A_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : str = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Any = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Dict = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = RegNetModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )->Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _snake_case ( self )->str:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.default_image_processor
A_ : Any = prepare_img()
A_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 186 | 0 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
lowercase__: str = [0] * no_of_processes
lowercase__: Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__UpperCAmelCase ):
lowercase__: Any = burst_time[i]
lowercase__: list[int] = []
lowercase__: Dict = 0
lowercase__: Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase__: Dict = []
lowercase__: Any = -1
for i in range(__UpperCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase__: Optional[int] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase__: Tuple = i
total_time += burst_time[target_process]
completed += 1
lowercase__: Tuple = 0
lowercase__: List[str] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
lowercase__: List[Any] = [0] * no_of_processes
for i in range(__UpperCAmelCase ):
lowercase__: str = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__A = 4
__A = [2, 5, 3, 7]
__A = [0, 0, 0, 0]
__A = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__A = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 2 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""facebook/nllb-large-en-ro""": 1_024,
"""facebook/nllb-200-distilled-600M""": 1_024,
}
# fmt: off
lowercase_ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=None , A=None , A=None , A=False , **A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , legacy_behaviour=A , **A , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_( self , A ) -> None:
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_( self , A , A = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_( self , A , A , A , A , **A ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(A , add_special_tokens=A , return_tensors=A , **A )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(A )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def snake_case_( self , A , A = "eng_Latn" , A = None , A = "fra_Latn" , **A , ) -> BatchEncoding:
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def snake_case_( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_( self ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_( self , A ) -> None:
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_( self , A ) -> None:
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case_( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 58 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Dict:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = stop_gradient
_SCREAMING_SNAKE_CASE = share_layernorm
_SCREAMING_SNAKE_CASE = remove_last_layer
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_text_model'''
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> Union[str, Any]:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower'''
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Tuple:
# TODO: remove this once the Hub files are updated.
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" , A )
_SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
_SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = share_link_tower_layers
_SCREAMING_SNAKE_CASE = link_tower_type
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = tie_word_embeddings
_SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**A )
_SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**A )
@classmethod
def snake_case_( cls , A , A , **A ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 58 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : Optional[Any] = """"""
lowercase : int = """"""
lowercase : List[Any] = """"""
lowercase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ) -> None:
a__ , a__ : str = get_dataset(A__ , A__ )
print('Processing...' )
a__ , a__ , a__ : Tuple = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ : int = random_chars(32 )
a__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ : Optional[int] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(A__ )} with {file_name}' )
a__ : List[str] = []
for anno in new_annos[index]:
a__ : Union[str, Any] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A__ )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def A_ ( A__ , A__ ) -> tuple[list, list]:
a__ : int = []
a__ : int = []
for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ):
a__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A__ ) as in_file:
a__ : Tuple = in_file.readlines()
a__ : Dict = os.path.join(A__ , F'{label_name}.jpg' )
a__ : int = []
for obj_list in obj_lists:
a__ : Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def A_ ( A__ , A__ , A__ = 1 ) -> tuple[list, list, list]:
a__ : Optional[int] = []
a__ : Any = []
a__ : Dict = []
for idx in range(len(A__ ) ):
a__ : Optional[int] = []
a__ : Optional[Any] = img_list[idx]
path_list.append(A__ )
a__ : Union[str, Any] = anno_list[idx]
a__ : List[str] = cva.imread(A__ )
if flip_type == 1:
a__ : List[str] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a__ : Optional[Any] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def A_ ( A__ = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__a = (DDPMParallelScheduler,)
def lowerCamelCase__ ( self : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__UpperCAmelCase )
return config
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : Union[str, Any] = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.scheduler_classes[0]
__UpperCAmelCase : Tuple = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
__UpperCAmelCase : Tuple = self.dummy_sample_deter + 0.1
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter - 0.1
__UpperCAmelCase : List[str] = samplea.shape[0]
__UpperCAmelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase : Optional[int] = torch.arange(__UpperCAmelCase )[0:3, None].repeat(1 , __UpperCAmelCase )
__UpperCAmelCase : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase : Tuple = scheduler.batch_step_no_noise(__UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__UpperCAmelCase : Optional[int] = torch.sum(torch.abs(__UpperCAmelCase ) )
__UpperCAmelCase : str = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Any = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Any = len(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.dummy_model()
__UpperCAmelCase : List[str] = self.dummy_sample_deter
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
__UpperCAmelCase : Tuple = pred_prev_sample
__UpperCAmelCase : Any = torch.sum(torch.abs(__UpperCAmelCase ) )
__UpperCAmelCase : str = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__UpperCAmelCase : List[str] = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
__UpperCAmelCase : Dict = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
__UpperCAmelCase : Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
__UpperCAmelCase : str = pred_prev_sample
__UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Tuple = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
__UpperCAmelCase : Any = scheduler.timesteps
for i, timestep in enumerate(__UpperCAmelCase ):
if i == len(__UpperCAmelCase ) - 1:
__UpperCAmelCase : int = -1
else:
__UpperCAmelCase : str = timesteps[i + 1]
__UpperCAmelCase : int = scheduler.previous_timestep(__UpperCAmelCase )
__UpperCAmelCase : List[str] = prev_t.item()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : int = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Any = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCAmelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.scheduler_classes[0]
__UpperCAmelCase : Tuple = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Tuple = [100, 87, 50, 1, 0]
__UpperCAmelCase : str = len(__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : Tuple = self.get_scheduler_config()
__UpperCAmelCase : int = scheduler_class(**__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCAmelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
| 115 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = 'src/diffusers'
UpperCamelCase_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase_ = spec.loader.load_module()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase ) is not None
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = object_name.split("." )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase ):
a_ = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = ""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(UpperCAmelCase )
UpperCamelCase_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase_ = re.compile(R'<FILL\s+[^>]*>')
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = code.split("\n" )
a_ = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
a_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
a_ , a_ = style_docstrings_in_code(UpperCAmelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->str:
"""simple docstring"""
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(UpperCAmelCase )
a_ = get_indent(UpperCAmelCase )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
a_ = lines[line_index]
a_ = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = "".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
a_ = "\n".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
a_ = replace_pattern.replace("with" , "" ).split("," )
a_ = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
a_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def UpperCamelCase ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
a_ = glob.glob(os.path.join(UpperCAmelCase , "**/*.py" ) , recursive=UpperCAmelCase )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
a_ = "\n".join(UpperCAmelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite) | 243 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Any=False , **__UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = vocab_size
a = d_embed
a = d_proj
a = cutoffs + [vocab_size]
a = [0] + self.cutoffs
a = div_val
a = self.cutoffs[0]
a = len(self.cutoffs ) - 1
a = self.shortlist_size + self.n_clusters
a = keep_order
a = []
a = []
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
if self.n_clusters > 0:
a = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__UpperCAmelCase , name='''cluster_weight''' )
a = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
a = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(__UpperCAmelCase )
else:
self.out_projs.append(__UpperCAmelCase )
a = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
a = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.d_embed // (self.div_val**i)
a = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(__UpperCAmelCase )
a = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
a = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=None ) ->Dict:
"""simple docstring"""
a = x
if proj is not None:
a = tf.einsum('''ibd,ed->ibe''' , __UpperCAmelCase , __UpperCAmelCase )
return tf.einsum('''ibd,nd->ibn''' , __UpperCAmelCase , __UpperCAmelCase ) + b
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
a = shape_list(__UpperCAmelCase )
a = tf.range(lp_size[0] , dtype=target.dtype )
a = tf.stack([r, target] , 1 )
return tf.gather_nd(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Union[str, Any]=False ) ->str:
"""simple docstring"""
a = 0
if self.n_clusters == 0:
a = self._logit(__UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
a = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__UpperCAmelCase , logits=__UpperCAmelCase )
a = tf.nn.log_softmax(__UpperCAmelCase , axis=-1 )
else:
a = shape_list(__UpperCAmelCase )
a = []
a = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a = (target >= l_idx) & (target < r_idx)
a = tf.where(__UpperCAmelCase )
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase ) - l_idx
if self.div_val == 1:
a = self.out_layers[0][0][l_idx:r_idx]
a = self.out_layers[0][1][l_idx:r_idx]
else:
a = self.out_layers[i][0]
a = self.out_layers[i][1]
if i == 0:
a = tf.concat([cur_W, self.cluster_weight] , 0 )
a = tf.concat([cur_b, self.cluster_bias] , 0 )
a = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[0] )
a = tf.nn.log_softmax(__UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
else:
a = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[i] )
a = tf.nn.log_softmax(__UpperCAmelCase )
a = self.cutoffs[0] + i - 1 # No probability for the head cluster
a = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__UpperCAmelCase )
if target is not None:
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__UpperCAmelCase , -cur_logprob , shape_list(__UpperCAmelCase ) )
a = tf.concat(__UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
a = tf.reduce_mean(__UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__UpperCAmelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 68 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Optional[Any] = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def A__ ( self: str ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def A__ ( self: Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(lowerCamelCase_ )
@property
def A__ ( self: Tuple ) -> Tuple:
def extract(*lowerCamelCase_: Optional[Any] ,**lowerCamelCase_: str ):
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = torch.ones([0] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> int:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,)
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : str = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Dict = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.dummy_cond_unet
UpperCAmelCase_ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : str = self.dummy_vae
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : int = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : int = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
assert isinstance(pipe.scheduler ,lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Tuple = self.dummy_cond_unet
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.dummy_vae
UpperCAmelCase_ : List[str] = self.dummy_text_encoder
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCAmelCase_ : Optional[Any] = unet.half()
UpperCAmelCase_ : Optional[int] = vae.half()
UpperCAmelCase_ : int = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
UpperCAmelCase_ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = """A painting of a squirrel eating a burger"""
UpperCAmelCase_ : Optional[int] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ) -> List[Any]:
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCAmelCase_ : Optional[int] = 4003660346
UpperCAmelCase_ : int = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : Dict = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Any = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase_ )
UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCAmelCase_ : List[Any] = 2734971755
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : int = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCAmelCase_ : Any = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : Dict = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCAmelCase_ : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase_ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCAmelCase_ : Optional[Any] = 1044355234
UpperCAmelCase_ : List[str] = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=lowerCamelCase_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_2_5 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 345 | 0 |
__A : Optional[Any] = 'Tobias Carryer'
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=int(time() ) )-> Union[str, Any]: # noqa: B008
lowerCamelCase_ =multiplier
lowerCamelCase_ =increment
lowerCamelCase_ =modulo
lowerCamelCase_ =seed
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__A : str = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 49 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCamelCase ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =coefficient_matrix.shape
lowerCamelCase_ , lowerCamelCase_ =constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ =f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_A )
if colsa != 1:
lowerCamelCase_ =f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_A )
if rowsa != rowsa:
lowerCamelCase_ =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_A )
if len(_A ) != rowsa:
lowerCamelCase_ =(
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_A )} and {rowsa}'
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase_ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ , lowerCamelCase_ =table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
lowerCamelCase_ =[]
for row in range(_A ):
lowerCamelCase_ =0
for col in range(_A ):
if col == row:
lowerCamelCase_ =table[row][col]
elif col == cols - 1:
lowerCamelCase_ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ =(temp + val) / denom
new_val.append(_A )
lowerCamelCase_ =new_val
return [float(_A ) for i in new_val]
def __UpperCamelCase ( _A : NDArray[floataa] ) ->bool:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =table.shape
lowerCamelCase_ =True
for i in range(0 , _A ):
lowerCamelCase_ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''mask2former'''
_lowercase : Optional[Any] = ['''swin''']
_lowercase : List[str] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , a = None , a = 256 , a = 256 , a = 256 , a = 1024 , a = "relu" , a = 6 , a = 10 , a = 8 , a = 0.0 , a = 2048 , a = False , a = False , a = 4 , a = 255 , a = 100 , a = 0.1 , a = 2.0 , a = 5.0 , a = 5.0 , a = 1_2544 , a = 3.0 , a = 0.75 , a = 0.02 , a = 1.0 , a = True , a = [4, 8, 16, 32] , a = None , **a , ) -> Optional[Any]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=a , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(a , a):
SCREAMING_SNAKE_CASE = backbone_config.pop('model_type')
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(a)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported)}''')
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = mask_feature_size
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = encoder_feedforward_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = dim_feedforward
SCREAMING_SNAKE_CASE = pre_norm
SCREAMING_SNAKE_CASE = enforce_input_projection
SCREAMING_SNAKE_CASE = common_stride
SCREAMING_SNAKE_CASE = ignore_value
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = no_object_weight
SCREAMING_SNAKE_CASE = class_weight
SCREAMING_SNAKE_CASE = mask_weight
SCREAMING_SNAKE_CASE = dice_weight
SCREAMING_SNAKE_CASE = train_num_points
SCREAMING_SNAKE_CASE = oversample_ratio
SCREAMING_SNAKE_CASE = importance_sample_ratio
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = use_auxiliary_loss
SCREAMING_SNAKE_CASE = feature_strides
SCREAMING_SNAKE_CASE = output_auxiliary_logits
SCREAMING_SNAKE_CASE = decoder_layers
super().__init__(**a)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> int:
return cls(
backbone_config=a , **a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 137 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 228 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''num_heads''' ) )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : List[str]=13 , _lowerCamelCase : str=64 , _lowerCamelCase : str=3 , _lowerCamelCase : Optional[Any]=[16, 48, 96] , _lowerCamelCase : Dict=[1, 3, 6] , _lowerCamelCase : Tuple=[1, 2, 10] , _lowerCamelCase : List[str]=[7, 3, 3] , _lowerCamelCase : Any=[4, 2, 2] , _lowerCamelCase : str=[2, 1, 1] , _lowerCamelCase : List[str]=[2, 2, 2] , _lowerCamelCase : List[Any]=[False, False, True] , _lowerCamelCase : Any=[0.0, 0.0, 0.0] , _lowerCamelCase : int=0.02 , _lowerCamelCase : Tuple=1E-12 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int=2 , ):
"""simple docstring"""
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[str] = image_size
A_ : str = patch_sizes
A_ : Union[str, Any] = patch_stride
A_ : int = patch_padding
A_ : int = is_training
A_ : List[str] = use_labels
A_ : List[str] = num_labels
A_ : List[Any] = num_channels
A_ : str = embed_dim
A_ : Optional[int] = num_heads
A_ : Optional[Any] = stride_kv
A_ : str = depth
A_ : Any = cls_token
A_ : int = attention_drop_rate
A_ : str = initializer_range
A_ : int = layer_norm_eps
def _a ( self : Any ):
"""simple docstring"""
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = CvtModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
A_ : Union[str, Any] = (self.image_size, self.image_size)
A_ ,A_ : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A_ : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A_ : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.num_labels
A_ : Tuple = CvtForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = CvtModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
A_ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.hidden_states
A_ : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Union[str, Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _a ( self : int ):
"""simple docstring"""
pass
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = CvtModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Any:
A_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : str = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : str = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _SCREAMING_SNAKE_CASE (A , A , A ) -> list[int]:
"""simple docstring"""
lowercase__ = [0] * no_of_processes
lowercase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A ):
lowercase__ = burst_time[i]
lowercase__ = []
lowercase__ = 0
lowercase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase__ = []
lowercase__ = -1
for i in range(A ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A )
if len(A ) > 0:
lowercase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase__ = i
total_time += burst_time[target_process]
completed += 1
lowercase__ = 0
lowercase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE (A , A , A ) -> list[int]:
"""simple docstring"""
lowercase__ = [0] * no_of_processes
for i in range(A ):
lowercase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : Optional[Any] = [2, 5, 3, 7]
lowerCamelCase : str = [0, 0, 0, 0]
lowerCamelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 2 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[Any]=None , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =vocab_size - 1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase =True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Any:
'''simple docstring'''
__UpperCamelCase =GPTNeoXModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
__UpperCamelCase =True
__UpperCamelCase =GPTNeoXModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =GPTNeoXForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.num_labels
__UpperCamelCase =GPTNeoXForQuestionAnswering(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
__UpperCamelCase =self.num_labels
__UpperCamelCase =GPTNeoXForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.num_labels
__UpperCamelCase =GPTNeoXForTokenClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =True
__UpperCamelCase =GPTNeoXForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
__UpperCamelCase =output_from_no_past['''hidden_states'''][0]
__UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =GPTNeoXModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=64 , num_attention_heads=8 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase =None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase =GPTNeoXModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
__UpperCamelCase =original_model(UpperCamelCase__ ).last_hidden_state
__UpperCamelCase =original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase ={'''type''': scaling_type, '''factor''': 10.0}
__UpperCamelCase =GPTNeoXModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
__UpperCamelCase =scaled_model(UpperCamelCase__ ).last_hidden_state
__UpperCamelCase =scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__UpperCamelCase =GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(UpperCamelCase__ )
__UpperCamelCase =tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(UpperCamelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__UpperCamelCase ='''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__UpperCamelCase =model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 )
__UpperCamelCase =tokenizer.batch_decode(UpperCamelCase__ )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 353 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =Node(UpperCamelCase__ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def UpperCAmelCase_ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( lowerCamelCase ):
lowercase = ["""input_features"""]
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=160 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = n_fft
UpperCamelCase = hop_length
UpperCamelCase = chunk_length
UpperCamelCase = chunk_length * sampling_rate
UpperCamelCase = self.n_samples // hop_length
UpperCamelCase = sampling_rate
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "max_length" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCamelCase = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech] ).T]
UpperCamelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
UpperCamelCase = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
UpperCamelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
UpperCamelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
UpperCamelCase = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 321 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCAmelCase (__UpperCamelCase : Accelerator , __UpperCamelCase : int = 1_6 , __UpperCamelCase : str = "bert-base-cased" ):
"""simple docstring"""
__UpperCamelCase =AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__UpperCamelCase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase =datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
__UpperCamelCase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config["""lr"""]
__UpperCamelCase =int(config['''num_epochs'''] )
__UpperCamelCase =int(config['''seed'''] )
__UpperCamelCase =int(config['''batch_size'''] )
__UpperCamelCase =args.model_name_or_path
set_seed(UpperCAmelCase__ )
__UpperCamelCase =get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
__UpperCamelCase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase =optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__UpperCamelCase =1
__UpperCamelCase =(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
__UpperCamelCase =DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase =accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase =0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase =0
# Now we train the model
__UpperCamelCase =evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase =0
__UpperCamelCase ={}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
__UpperCamelCase =model(**UpperCAmelCase__ )
__UpperCamelCase =outputs.loss
__UpperCamelCase =loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase =0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**UpperCAmelCase__ )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
__UpperCamelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase__ )
__UpperCamelCase =eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase =eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 371 | """simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase_ ( lowercase__ ):
__lowerCamelCase : Tuple = """gpt_neo"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : str = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=2048 , _lowerCAmelCase=2048 , _lowerCAmelCase=24 , _lowerCAmelCase=[[["global", "local"], 12]] , _lowerCAmelCase=16 , _lowerCAmelCase=None , _lowerCAmelCase=256 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , **_lowerCAmelCase , ) -> Any:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = window_size
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_dropout
_lowerCAmelCase = embed_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = attention_types
_lowerCAmelCase = self.expand_attention_types_params(_A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
import torch
_lowerCAmelCase = input.size()
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = shape[dimension]
_lowerCAmelCase = torch.arange(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = torch.div(sizedim - size , SCREAMING_SNAKE_CASE_ , rounding_mode="floor" ) + 1
_lowerCAmelCase = torch.arange(SCREAMING_SNAKE_CASE_ ) + low_indices[:min_length][:, None]
_lowerCAmelCase = [slice(SCREAMING_SNAKE_CASE_ )] * rank
_lowerCAmelCase = indices
_lowerCAmelCase = input[s]
_lowerCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
import torch
_lowerCAmelCase = torch.arange(1 , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = torch.remainder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = remainders == 0
_lowerCAmelCase = candidates[divisor_indices]
_lowerCAmelCase = torch.max(SCREAMING_SNAKE_CASE_ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rounding_mode="floor" )
class lowerCAmelCase_ ( lowercase__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
_lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _snake_case ( self ) -> int:
return self._config.num_heads
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
_lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ) -> int:
return 13
| 158 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Any = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BlenderbotSmallTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[int] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : int = dict(zip(_A , range(len(_A ) ) ) )
_lowerCamelCase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : str = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_A )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = '''adapt act apte'''
_lowerCamelCase : Union[str, Any] = '''adapt act apte'''
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[int] = '''adapt act apte'''
_lowerCamelCase : List[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Dict = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_lowerCamelCase : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def A_ ( self ):
_lowerCamelCase : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_lowerCamelCase : Union[str, Any] = '''I am a small frog.'''
_lowerCamelCase : Dict = tok([src_text] , padding=_A , truncation=_A )['''input_ids''']
_lowerCamelCase : Dict = tok.batch_decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A_ ( self ):
_lowerCamelCase : List[str] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_lowerCamelCase : List[str] = '''I am a small frog .'''
_lowerCamelCase : Any = '''.'''
_lowerCamelCase : List[str] = tok(_A )['''input_ids''']
_lowerCamelCase : Optional[int] = tok(_A )['''input_ids''']
assert encoded[-1] == encoded_dot[0] | 96 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 308 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 0 |
from ..utils import DummyObject, requires_backends
class __a ( metaclass=__UpperCamelCase ):
__lowercase : Dict = ['''torch''', '''torchsde''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 196 |
"""simple docstring"""
from manim import *
class _SCREAMING_SNAKE_CASE( A ):
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
__SCREAMING_SNAKE_CASE :List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :List[str] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Any = VGroup(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Tuple = Text('''CPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :Optional[Any] = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = [mem.copy() for i in range(1 )]
__SCREAMING_SNAKE_CASE :str = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = Text('''GPU''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.align_to(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE :int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0 )
__SCREAMING_SNAKE_CASE :List[Any] = Text('''Model''' ,font_size=24 )
__SCREAMING_SNAKE_CASE :int = Group(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ ,buff=0.5 ,aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,Create(SCREAMING_SNAKE_CASE__ ,run_time=1 ) ,)
__SCREAMING_SNAKE_CASE :List[str] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__SCREAMING_SNAKE_CASE :List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE :Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ ,run_time=2.5 ) ,Write(SCREAMING_SNAKE_CASE__ ) ,Write(SCREAMING_SNAKE_CASE__ ) )
self.add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ ,opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE__ )
cpu_target.generate_target()
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0.4_6 / 4
__SCREAMING_SNAKE_CASE :Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=SCREAMING_SNAKE_CASE__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=SCREAMING_SNAKE_CASE__ ,buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE__ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ ,run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(*SCREAMING_SNAKE_CASE__ )
self.wait() | 191 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
A : Dict = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
A : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
A : str = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __lowerCAmelCase ( a__ , a__ ) -> int:
return float((preds == labels).mean() )
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
__a = simple_accuracy(a__ , a__ )
__a = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = float(pearsonr(a__ , a__ )[0] )
__a = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_snake_case , _snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(_snake_case , _snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_snake_case , _snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_snake_case , _snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) | 33 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
) | 33 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase ( __lowerCamelCase : Dataset , __lowerCamelCase : Dict[str, str] ):
snake_case : int = args.log_outputs
snake_case : Dict = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case : List[str] = load_metric("wer" )
snake_case : Tuple = load_metric("cer" )
# compute metrics
snake_case : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case : int = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case : int = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__lowerCamelCase )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(__lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : int = f"""log_{dataset_id}_predictions.txt"""
snake_case : List[Any] = f"""log_{dataset_id}_targets.txt"""
with open(__lowerCamelCase , "w" ) as p, open(__lowerCamelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__lowerCamelCase , with_indices=__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : List[Any] = re.sub(__lowerCamelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case : Dict = " ".join(text.split(__lowerCamelCase ) )
return text
def UpperCamelCase ( __lowerCamelCase : int ):
# load dataset
snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCamelCase ) )
# load eval pipeline
if args.device is None:
snake_case : List[str] = 0 if torch.cuda.is_available() else -1
snake_case : str = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCamelCase : int ):
snake_case : Dict = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : str = prediction["text"]
snake_case : Tuple = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case : Dict = dataset.map(__lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase = parser.parse_args()
main(args)
| 59 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Union[str, Any] = HfArgumentParser(__lowerCamelCase )
__SCREAMING_SNAKE_CASE :Any = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE :Any = TensorFlowBenchmark(args=__lowerCamelCase )
try:
__SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__SCREAMING_SNAKE_CASE :Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__SCREAMING_SNAKE_CASE :List[str] = " ".join(str(__lowerCamelCase ).split(''' ''' )[:-1] )
__SCREAMING_SNAKE_CASE :Optional[Any] = ""
__SCREAMING_SNAKE_CASE :str = eval(str(__lowerCamelCase ).split(''' ''' )[-1] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__SCREAMING_SNAKE_CASE :str = full_error_msg + begin_error_msg + str(__lowerCamelCase )
raise ValueError(__lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main() | 351 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__SCREAMING_SNAKE_CASE :Optional[int] = deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ ,'''r''' ,encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Dict = json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
__SCREAMING_SNAKE_CASE :str = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = config
self.set_stage_and_offload()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_value('''zero_optimization.stage''' ,-1 )
# offload
__SCREAMING_SNAKE_CASE :Any = False
if self.is_zeroa() or self.is_zeroa():
__SCREAMING_SNAKE_CASE :Optional[Any] = set(['''cpu''', '''nvme'''] )
__SCREAMING_SNAKE_CASE :Optional[Any] = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__SCREAMING_SNAKE_CASE :int = True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :Union[str, Any] = ds_key_long.split('''.''' )
__SCREAMING_SNAKE_CASE :List[str] = nodes.pop()
for node in nodes:
__SCREAMING_SNAKE_CASE :Dict = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :str = ds_key_long.split('''.''' )
for node in nodes:
__SCREAMING_SNAKE_CASE :Any = config
__SCREAMING_SNAKE_CASE :int = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._stage == 2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._offload
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = engine
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,device_placement=SCREAMING_SNAKE_CASE__ ,scaler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = hasattr(self.optimizer ,'''overflow''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = params
__SCREAMING_SNAKE_CASE :str = lr
__SCREAMING_SNAKE_CASE :str = weight_decay
__SCREAMING_SNAKE_CASE :int = kwargs
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = optimizer
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_num_steps
__SCREAMING_SNAKE_CASE :Any = warmup_num_steps
__SCREAMING_SNAKE_CASE :int = kwargs | 239 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__A = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__A = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__A = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = position
__lowerCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowerCamelCase = []
for position in positions:
__lowerCamelCase, __lowerCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : tuple[int, int] , A__ : int ):
'''simple docstring'''
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
__lowerCamelCase, __lowerCamelCase = position
if board[y][x] == 0:
__lowerCamelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
__lowerCamelCase = 0
return False
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
__lowerCamelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
__lowerCamelCase = 0
__lowerCamelCase = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A: int = datasets.logging.get_logger(__name__)
A: int = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
A: Union[str, Any] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n"
A: List[str] = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
A: Optional[Any] = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').""" )
UpperCAmelCase : Tuple = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase : Dict = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase : List[str] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase : Any = score.BleurtScorer(os.path.join(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.scorer.score(references=__lowercase , candidates=__lowercase )
return {"scores": scores}
| 109 |
'''simple docstring'''
from __future__ import annotations
import math
a_ = '2020.9.26'
a_ = 'xcodz-dot, cclaus, dhruvmanila'
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE__ : int =f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE__ : Tuple =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Axis must be a str''' )
SCREAMING_SNAKE_CASE__ : List[Any] =locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__, (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE__ : List[str] =(
'''Input values except axis must either be float or int: '''
f"{list(input_variables.values() )}"
)
raise TypeError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =(angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE__ : str =x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =z
elif axis == "x":
SCREAMING_SNAKE_CASE__ : Dict =y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =x
elif axis == "y":
SCREAMING_SNAKE_CASE__ : Tuple =x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }''') | 152 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : str =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : Dict =[1] * len(UpperCamelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase__ ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase__ )
while queue:
SCREAMING_SNAKE_CASE__ : Optional[Any] =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : Optional[int] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCamelCase__ )
print(max(UpperCamelCase__ ) )
# Adjacency list of Graph
a_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 371 |
'''simple docstring'''
import math
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return math.sqrt(UpperCamelCase__ ) * math.sqrt(UpperCamelCase__ ) == num
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Any =n
while left <= right:
SCREAMING_SNAKE_CASE__ : str =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mid - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 | 0 |
import argparse
import os
import re
lowerCAmelCase = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
lowercase__ = content.split('''\n''' )
lowercase__ = []
lowercase__ = 0
while line_idx < len(SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase__ = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase__ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
elif "\n".join(SCREAMING_SNAKE_CASE ) != content:
return True
def _a ( SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for f in os.listdir(SCREAMING_SNAKE_CASE ) if f.endswith('''.py''' )]
lowercase__ = [sort_auto_mapping(SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE ):
lowercase__ = [f for f, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 110 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
lowercase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ = tensor_value
lowercase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
lowercase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 110 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ (_UpperCAmelCase):
if not sentence:
return ""
SCREAMING_SNAKE_CASE = dict(zip(_A , _A))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Optional[Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : Optional[int] = ['''pixel_values''']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = 1 / 255 , a = True , a = None , a = None , a = True , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE = get_size_dict(a , default_to_square=a)
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(a , default_to_square=a)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''')
SCREAMING_SNAKE_CASE = (size['height'], size['width'])
return resize(a , size=a , resample=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a , ) -> Optional[Any]:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(a , default_to_square=a)
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE = [convert_to_rgb(a) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=a , size=a , resample=a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=a , mean=a , std=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={'pixel_values': images} , tensor_type=a)
return encoded_outputs
| 327 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =StableDiffusionLatentUpscalePipeline
snake_case_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
snake_case_ =PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
snake_case_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ =frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case_ =frozenset([])
snake_case_ =True
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Tuple = (16, 16)
lowerCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = UNetaDConditionModel(
act_fn='''gelu''' ,attention_head_dim=8 ,norm_num_groups=__lowerCamelCase ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=1_60 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) ,in_channels=8 ,mid_block_type=__lowerCamelCase ,only_cross_attention=__lowerCamelCase ,out_channels=5 ,resnet_time_scale_shift='''scale_shift''' ,time_embedding_type='''fourier''' ,timestep_post_act='''gelu''' ,up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') ,)
lowerCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
lowerCAmelCase__ : List[str] = EulerDiscreteScheduler(prediction_type='''sample''' )
lowerCAmelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act='''quick_gelu''' ,projection_dim=5_12 ,)
lowerCAmelCase__ : Union[str, Any] = CLIPTextModel(__lowerCamelCase )
lowerCAmelCase__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> List[str]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = '''cpu'''
lowerCAmelCase__ : List[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : List[str] = pipe(**__lowerCamelCase ).images
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 2_56, 2_56, 3) )
lowerCAmelCase__ : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowerCAmelCase__ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase ,1e-3 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase__ : Optional[Any] = getattr(__lowerCamelCase ,scheduler_enum.name )
lowerCAmelCase__ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase__ : List[Any] = pipe(**__lowerCamelCase )[0]
outputs.append(__lowerCamelCase )
assert check_same_shape(__lowerCamelCase )
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.manual_seed(33 )
lowerCAmelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ,torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowerCAmelCase__ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : str = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowerCAmelCase__ : Tuple = pipe(__lowerCamelCase ,generator=__lowerCamelCase ,output_type='''latent''' ).images
lowerCAmelCase__ : Dict = upscaler(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=__lowerCamelCase ,output_type='''np''' ,).images[0]
lowerCAmelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = torch.manual_seed(33 )
lowerCAmelCase__ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase__ : Union[str, Any] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowerCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowerCAmelCase__ : List[str] = upscaler(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=__lowerCamelCase ,output_type='''np''' ,).images[0]
lowerCAmelCase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 129 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Any):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase__ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : int = torch.permute(lowerCamelCase_ ,(0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_):
# linear layer
lowerCAmelCase__ : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
if "metadata" in layer:
lowerCAmelCase__ : Optional[Any] = layer.split('''metadata''')
lowerCAmelCase__ : int = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(('''metadata''' + split_layer[1]).split('''/'''))]
elif "kvstore" in layer:
lowerCAmelCase__ : Optional[int] = layer.split('''kvstore''')
lowerCAmelCase__ : Optional[Any] = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Tuple = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))]
else:
lowerCAmelCase__ : List[str] = layer.split('''/''')
lowerCAmelCase__ : int = '''/'''.join(split_layer[:-1])
lowerCAmelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase__ : Optional[Any] = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCAmelCase__ : Dict = '''file'''
else:
lowerCAmelCase__ : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = rename_keys(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {}
for k, v in current_block.items():
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Tuple = new_current_block
torch.save(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str = WEIGHTS_NAME):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = convert_file_size_to_int(lowerCamelCase_)
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : List[str] = 0
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_)
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' ,'''rb''') as fp:
lowerCAmelCase__ : str = serialization.msgpack_restore(fp.read())['''optimizer''']['''target''']
lowerCAmelCase__ : int = flatten_dict(lowerCamelCase_ ,sep='''/''')
lowerCAmelCase__ : str = {}
for layer in checkpoint_info.keys():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = get_key_and_tensorstore_dict(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if curr_real_layer_name in all_layers:
lowerCAmelCase__ : List[Any] = content
else:
lowerCAmelCase__ : str = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase__ : str = ts.open(unflatten_dict(all_layers[key])).result().read().result()
lowerCAmelCase__ : str = torch.tensor(lowerCamelCase_)
lowerCAmelCase__ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
lowerCAmelCase__ , lowerCAmelCase__ : int = rename_base_flax_keys(tuple(key.split('''/''')) ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = '''/'''.join(lowerCamelCase_)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
del current_block
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = raw_weights.to(getattr(lowerCamelCase_ ,lowerCamelCase_))
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase__ : List[str] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(lowerCamelCase_) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Tuple = {}
for idx, shard in enumerate(lowerCamelCase_):
lowerCAmelCase__ : List[str] = weights_name.replace(
'''.bin''' ,f"""-{idx+1:05d}-of-{len(lowerCamelCase_):05d}.bin""") # len(sharded_state_dicts):05d}
lowerCAmelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{idx+1:05d}-of-???.bin"""))
os.rename(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,lowerCamelCase_))
lowerCAmelCase__ : List[Any] = shard
for key in shard:
lowerCAmelCase__ : Dict = shard_file
# Add the metadata
lowerCAmelCase__ : Optional[Any] = {'''total_size''': total_size}
lowerCAmelCase__ : Optional[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCamelCase_ ,lowerCamelCase_) ,'''w''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : List[Any] = json.dumps(lowerCamelCase_ ,indent=2 ,sort_keys=lowerCamelCase_) + '''\n'''
f.write(lowerCamelCase_)
return metadata, index
if __name__ == "__main__":
__snake_case : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case : Dict =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase__ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase__ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''')
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''')
lowerCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' ,device_map='''auto''')
lowerCAmelCase__ : Optional[Any] = TaTokenizer.from_pretrained('''t5-small''')
lowerCAmelCase__ : Any = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_ ,return_tensors='''pt''').input_ids
lowerCAmelCase__ : Tuple = model.generate(lowerCamelCase_ ,decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 129 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :int = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCAmelCase_ ( __lowercase : float ) -> str:
'''simple docstring'''
assert type(__lowercase ) in (int, float) and decimal == int(__lowercase )
_UpperCAmelCase = int(__lowercase )
_UpperCAmelCase = ""
_UpperCAmelCase = False
if decimal < 0:
_UpperCAmelCase = True
decimal *= -1
while decimal > 0:
_UpperCAmelCase , _UpperCAmelCase = divmod(__lowercase , 16 )
_UpperCAmelCase = values[remainder] + hexadecimal
_UpperCAmelCase = "0x" + hexadecimal
if negative:
_UpperCAmelCase = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 100_0000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = limit + 1
_UpperCAmelCase = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 156 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :int = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCamelCase :Tuple = 6
UpperCamelCase :Tuple = 128
UpperCamelCase :int = (2, 2, 18, 2)
UpperCamelCase :Dict = (4, 8, 16, 32)
elif "large" in model_name:
UpperCamelCase :Optional[Any] = 12
UpperCamelCase :Any = 192
UpperCamelCase :Tuple = (2, 2, 18, 2)
UpperCamelCase :List[Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
UpperCamelCase :Tuple = window_size
UpperCamelCase :Any = embed_dim
UpperCamelCase :str = depths
UpperCamelCase :Optional[Any] = num_heads
return config
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
if "encoder.mask_token" in name:
UpperCamelCase :Optional[Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
UpperCamelCase :Dict = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
UpperCamelCase :Any = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
UpperCamelCase :Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase :Dict = '''layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase :List[str] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
UpperCamelCase :str = '''swin.''' + name
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Union[str, Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCamelCase :Dict = key.split('''.''' )
UpperCamelCase :Dict = int(key_split[2] )
UpperCamelCase :int = int(key_split[4] )
UpperCamelCase :str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase :Dict = val[:dim, :]
UpperCamelCase :str = val[
dim : dim * 2, :
]
UpperCamelCase :Dict = val[-dim:, :]
else:
UpperCamelCase :Optional[Any] = val[
:dim
]
UpperCamelCase :List[Any] = val[
dim : dim * 2
]
UpperCamelCase :Union[str, Any] = val[
-dim:
]
else:
UpperCamelCase :Optional[Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
UpperCamelCase :Any = get_swin_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = SwinForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.eval()
UpperCamelCase :Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :Dict = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
UpperCamelCase :List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
with torch.no_grad():
UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 259 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase: List[str] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowercase: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 364 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 31 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase : Optional[int] = """src/transformers"""
lowercase : Union[str, Any] = """docs/source/en"""
lowercase : int = """."""
def A_ ( A__ , A__ , A__ ) -> Any:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : str = f.readlines()
# Find the start prompt.
a__ : str = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
a__ : Optional[Any] = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase : Dict = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
lowercase : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowercase : Optional[Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase : Optional[int] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
def A_ ( A__ ) -> str:
a__ : List[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , A__ )
return [m.group(0 ) for m in matches]
def A_ ( A__ , A__ ) -> List[Any]:
a__ : str = 2 if text == '✅' or text == '❌' else len(A__ )
a__ : Tuple = (width - text_length) // 2
a__ : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A_ ( ) -> List[str]:
a__ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a__ : Tuple = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a__ : Dict = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a__ : List[Any] = collections.defaultdict(A__ )
a__ : int = collections.defaultdict(A__ )
a__ : Any = collections.defaultdict(A__ )
a__ : int = collections.defaultdict(A__ )
a__ : List[str] = collections.defaultdict(A__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(A__ ):
a__ : List[Any] = None
if attr_name.endswith('Tokenizer' ):
a__ : Optional[Any] = slow_tokenizers
a__ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
a__ : int = fast_tokenizers
a__ : str = attr_name[:-13]
elif _re_tf_models.match(A__ ) is not None:
a__ : Optional[Any] = tf_models
a__ : str = _re_tf_models.match(A__ ).groups()[0]
elif _re_flax_models.match(A__ ) is not None:
a__ : Dict = flax_models
a__ : Optional[Any] = _re_flax_models.match(A__ ).groups()[0]
elif _re_pt_models.match(A__ ) is not None:
a__ : List[Any] = pt_models
a__ : Any = _re_pt_models.match(A__ ).groups()[0]
if lookup_dict is not None:
while len(A__ ) > 0:
if attr_name in model_name_to_prefix.values():
a__ : Dict = True
break
# Try again after removing the last word in the name
a__ : Dict = ''.join(camel_case_split(A__ )[:-1] )
# Let's build that table!
a__ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a__ : Dict = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a__ : Union[str, Any] = [len(A__ ) + 2 for c in columns]
a__ : List[str] = max([len(A__ ) for name in model_names] ) + 2
# Build the table per se
a__ : Union[str, Any] = '|' + '|'.join([_center_text(A__ , A__ ) for c, w in zip(A__ , A__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
a__ : List[Any] = {True: '✅', False: '❌'}
for name in model_names:
a__ : Union[str, Any] = model_name_to_prefix[name]
a__ : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(A__ , A__ ) for l, w in zip(A__ , A__ )] ) + "|\n"
return table
def A_ ( A__=False ) -> str:
a__ , a__ , a__ , a__ : Tuple = _find_text_in_file(
filename=os.path.join(A__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
a__ : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(A__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 99 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""_T""")
class A__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self , lowercase = None) -> None:
'''simple docstring'''
a__ : list[_T] = list(iterable or [])
a__ : list[_T] = []
def __len__( self) -> int:
'''simple docstring'''
return len(self._stacka) + len(self._stacka)
def __repr__( self) -> str:
'''simple docstring'''
return F'Queue({tuple(self._stacka[::-1] + self._stacka)})'
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
self._stacka.append(lowercase)
def __lowercase ( self) -> _T:
'''simple docstring'''
a__ : List[str] = self._stacka.pop
a__ : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError('Queue is empty')
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : Optional[int] , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : Optional[Any]=3_0_7_2 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-12 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : List[str]=[1, 2, 3, 6] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=0.4 , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=2_5_5 , **UpperCamelCase__ : Dict , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Dict = initializer_range
__lowerCAmelCase: Any = layer_norm_eps
__lowerCAmelCase: Union[str, Any] = image_size
__lowerCAmelCase: Tuple = patch_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: Optional[Any] = use_mask_token
__lowerCAmelCase: str = use_absolute_position_embeddings
__lowerCAmelCase: Optional[int] = use_relative_position_bias
__lowerCAmelCase: str = use_shared_relative_position_bias
__lowerCAmelCase: Union[str, Any] = layer_scale_init_value
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: int = out_indices
__lowerCAmelCase: Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[Any] = use_auxiliary_head
__lowerCAmelCase: int = auxiliary_loss_weight
__lowerCAmelCase: Dict = auxiliary_channels
__lowerCAmelCase: Any = auxiliary_num_convs
__lowerCAmelCase: Any = auxiliary_concat_input
__lowerCAmelCase: Any = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = version.parse("""1.11""" )
@property
def lowercase_ ( self : Optional[Any])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : Optional[int] , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : Optional[Any]=3_0_7_2 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : str=1e-12 , UpperCamelCase__ : Dict=2_2_4 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : List[str]=[1, 2, 3, 6] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=0.4 , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]=2_5_5 , **UpperCamelCase__ : Dict , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: int = hidden_act
__lowerCAmelCase: Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Dict = initializer_range
__lowerCAmelCase: Any = layer_norm_eps
__lowerCAmelCase: Union[str, Any] = image_size
__lowerCAmelCase: Tuple = patch_size
__lowerCAmelCase: List[str] = num_channels
__lowerCAmelCase: Optional[Any] = use_mask_token
__lowerCAmelCase: str = use_absolute_position_embeddings
__lowerCAmelCase: Optional[int] = use_relative_position_bias
__lowerCAmelCase: str = use_shared_relative_position_bias
__lowerCAmelCase: Union[str, Any] = layer_scale_init_value
__lowerCAmelCase: Any = drop_path_rate
__lowerCAmelCase: Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: int = out_indices
__lowerCAmelCase: Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[Any] = use_auxiliary_head
__lowerCAmelCase: int = auxiliary_loss_weight
__lowerCAmelCase: Dict = auxiliary_channels
__lowerCAmelCase: Any = auxiliary_num_convs
__lowerCAmelCase: Any = auxiliary_concat_input
__lowerCAmelCase: Any = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = version.parse("""1.11""" )
@property
def lowercase_ ( self : Optional[Any])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 108 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[str]:
while second != 0:
__A : List[Any] = first & second
first ^= second
__A : Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : str = int(input('''Enter the first number: ''').strip())
UpperCAmelCase : Dict = int(input('''Enter the second number: ''').strip())
print(F"""{add(first, second) = }""")
| 280 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__SCREAMING_SNAKE_CASE = mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = max(
mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , mf_knapsack(i - 1 , lowerCAmelCase_ , lowerCAmelCase_ , j - wt[i - 1] ) + val[i - 1] , )
__SCREAMING_SNAKE_CASE = val
return f[i][j]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__SCREAMING_SNAKE_CASE = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__SCREAMING_SNAKE_CASE = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not (isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(lowerCAmelCase_ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
if num_items != len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = (
"The number of weights must be the same as the number of values.\n"
f"""But got {num_items} weights and {len(lowerCAmelCase_ )} values"""
)
raise ValueError(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
if not isinstance(wt[i] , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = (
"All weights must be integers but got weight of "
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = knapsack(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set()
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return optimal_val, example_optional_set
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , i - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
optimal_set.add(lowerCAmelCase_ )
_construct_solution(lowerCAmelCase_ , lowerCAmelCase_ , i - 1 , j - wt[i - 1] , lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[Any] = [3, 2, 4, 4]
a__ : Any = [4, 3, 2, 3]
a__ : Union[str, Any] = 4
a__ : Optional[Any] = 6
a__ : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a__ , a__ : Union[str, Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a__ , a__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a = '''facebook/wmt19-en-de'''
__a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
__a = tokenizer(['Making tiny model'], return_tensors='pt')
__a = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
__a = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 30 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "table-transformer"
a : List[Any] = ["past_key_values"]
a : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, __magic_name__=True, __magic_name__=None, __magic_name__=3, __magic_name__=100, __magic_name__=6, __magic_name__=2048, __magic_name__=8, __magic_name__=6, __magic_name__=2048, __magic_name__=8, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=True, __magic_name__="relu", __magic_name__=256, __magic_name__=0.1, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.02, __magic_name__=1.0, __magic_name__=False, __magic_name__="sine", __magic_name__="resnet50", __magic_name__=True, __magic_name__=False, __magic_name__=1, __magic_name__=5, __magic_name__=2, __magic_name__=1, __magic_name__=1, __magic_name__=5, __magic_name__=2, __magic_name__=0.1, **__magic_name__, ) -> Tuple:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : str = backbone_config.get('''model_type''' )
UpperCamelCase__ : str = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : List[Any] = config_class.from_dict(__magic_name__ )
# set timm attributes to None
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = None, None, None
UpperCamelCase__ : Optional[Any] = use_timm_backbone
UpperCamelCase__ : Union[str, Any] = backbone_config
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : str = num_queries
UpperCamelCase__ : List[str] = d_model
UpperCamelCase__ : Optional[Any] = encoder_ffn_dim
UpperCamelCase__ : Union[str, Any] = encoder_layers
UpperCamelCase__ : List[str] = encoder_attention_heads
UpperCamelCase__ : Dict = decoder_ffn_dim
UpperCamelCase__ : List[str] = decoder_layers
UpperCamelCase__ : Optional[Any] = decoder_attention_heads
UpperCamelCase__ : Optional[Any] = dropout
UpperCamelCase__ : Union[str, Any] = attention_dropout
UpperCamelCase__ : Tuple = activation_dropout
UpperCamelCase__ : Optional[int] = activation_function
UpperCamelCase__ : List[str] = init_std
UpperCamelCase__ : str = init_xavier_std
UpperCamelCase__ : Optional[Any] = encoder_layerdrop
UpperCamelCase__ : Tuple = decoder_layerdrop
UpperCamelCase__ : int = encoder_layers
UpperCamelCase__ : Any = auxiliary_loss
UpperCamelCase__ : int = position_embedding_type
UpperCamelCase__ : Optional[int] = backbone
UpperCamelCase__ : Tuple = use_pretrained_backbone
UpperCamelCase__ : Any = dilation
# Hungarian matcher
UpperCamelCase__ : List[str] = class_cost
UpperCamelCase__ : List[Any] = bbox_cost
UpperCamelCase__ : Union[str, Any] = giou_cost
# Loss coefficients
UpperCamelCase__ : Dict = mask_loss_coefficient
UpperCamelCase__ : List[str] = dice_loss_coefficient
UpperCamelCase__ : List[str] = bbox_loss_coefficient
UpperCamelCase__ : Tuple = giou_loss_coefficient
UpperCamelCase__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__, **__magic_name__ )
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return self.d_model
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return 12
| 247 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( lowerCamelCase: int ) -> str:
'''simple docstring'''
return (data["data"], data["target"])
def _a ( lowerCamelCase: List[str] , lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
__A = XGBClassifier()
classifier.fit(lowerCamelCase , lowerCamelCase )
return classifier
def _a ( ) -> Optional[Any]:
'''simple docstring'''
__A = load_iris()
__A = data_handling(lowerCamelCase )
__A = train_test_split(
lowerCamelCase , lowerCamelCase , test_size=0.25 )
__A = iris["target_names"]
# Create an XGBoost Classifier from the training data
__A = xgboost(lowerCamelCase , lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , display_labels=lowerCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 117 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Tuple = AutoencoderKL
_A : Union[str, Any] = '''sample'''
_A : int = 1E-2
@property
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = 4
SCREAMING_SNAKE_CASE:Dict = 3
SCREAMING_SNAKE_CASE:str = (32, 32)
SCREAMING_SNAKE_CASE:List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": image}
@property
def __UpperCamelCase ( self : Any ):
return (3, 32, 32)
@property
def __UpperCamelCase ( self : int ):
return (3, 32, 32)
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE:List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self : int ):
pass
def __UpperCamelCase ( self : Tuple ):
pass
@unittest.skipIf(torch_device == "mps" ,"Gradient checkpointing skipped on MPS" )
def __UpperCamelCase ( self : str ):
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE:Optional[Any] = self.model_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE:str = model(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE:str = torch.randn_like(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE:List[Any] = self.model_class(**SCREAMING_SNAKE_CASE__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE:Optional[int] = model_a(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE:List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE:Dict = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE:Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
SCREAMING_SNAKE_CASE:int = model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE:str = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE:Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE:Any = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
SCREAMING_SNAKE_CASE:Optional[int] = image.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).sample
SCREAMING_SNAKE_CASE:str = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE:List[Any] = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
SCREAMING_SNAKE_CASE:Dict = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
return F'''gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE__ ) for s in shape] )}.npy'''
def __UpperCamelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Any=(4, 3, 512, 512) ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE:str = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE:List[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) ).to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return image
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict="CompVis/stable-diffusion-v1-4" ,SCREAMING_SNAKE_CASE__ : int=False ):
SCREAMING_SNAKE_CASE:Union[str, Any] = "fp16" if fpaa else None
SCREAMING_SNAKE_CASE:Optional[Any] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE__ ,subfolder="vae" ,torch_dtype=SCREAMING_SNAKE_CASE__ ,revision=SCREAMING_SNAKE_CASE__ ,)
model.to(SCREAMING_SNAKE_CASE__ ).eval()
return model
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE__ )
return torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Any = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:Tuple = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:List[str] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE:str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Any = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE:List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE:Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:int = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) ,fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE:Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:str = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) ,fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[str] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE:int = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model.encode(SCREAMING_SNAKE_CASE__ ).latent_dist
SCREAMING_SNAKE_CASE:int = dist.sample(generator=SCREAMING_SNAKE_CASE__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE:List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE:int = torch.tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ )
| 139 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase_ : Optional[int] = '''pt'''
elif is_tf_available():
UpperCAmelCase_ : Tuple = '''tf'''
else:
UpperCAmelCase_ : Any = '''jax'''
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : str = ByTaTokenizer
snake_case__ : List[Any] = False
def _A ( self : str ):
super().setUp()
UpperCamelCase :List[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _A ( self : Optional[Any] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _A ( self : Optional[Any] , **__lowerCamelCase : List[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=20 , __lowerCamelCase : List[Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCamelCase :str = []
for i in range(len(__lowerCamelCase ) ):
try:
UpperCamelCase :Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCamelCase :int = list(filter(lambda __lowerCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCamelCase ) )
UpperCamelCase :Union[str, Any] = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
UpperCamelCase :str = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
UpperCamelCase :str = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase :int = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase :Optional[Any] = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
UpperCamelCase :Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
UpperCamelCase :Union[str, Any] = """ """ + output_txt
UpperCamelCase :Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = self.ta_base_tokenizer
UpperCamelCase :Dict = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
UpperCamelCase :Union[str, Any] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _A ( self : str ):
UpperCamelCase :Dict = self.ta_base_tokenizer
UpperCamelCase :Dict = """Unicode €."""
UpperCamelCase :List[str] = tokenizer(__lowerCamelCase )
UpperCamelCase :str = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCamelCase )
# decoding
UpperCamelCase :Dict = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , """Unicode €.</s>""" )
UpperCamelCase :Dict = tokenizer("""e è é ê ë""" )
UpperCamelCase :List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCamelCase )
# decoding
UpperCamelCase :int = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = self.ta_base_tokenizer
UpperCamelCase :Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
UpperCamelCase :Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCamelCase :Tuple = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
UpperCamelCase :Optional[int] = list(batch.input_ids.numpy()[0] )
else:
UpperCamelCase :List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[Any] = self.ta_base_tokenizer
UpperCamelCase :int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase :Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCamelCase )
self.assertIn("""attention_mask""" , __lowerCamelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCamelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Tuple = self.ta_base_tokenizer
UpperCamelCase :Any = [
"""Summary of the text.""",
"""Another summary.""",
]
UpperCamelCase :Dict = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _A ( self : List[Any] ):
UpperCamelCase :Any = self.ta_base_tokenizer
UpperCamelCase :Optional[Any] = ["""A long paragraph for summarization. </s>"""]
UpperCamelCase :Optional[int] = ["""Summary of the text. </s>"""]
# fmt: off
UpperCamelCase :Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCamelCase :Union[str, Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCamelCase :Optional[int] = tokenizer(__lowerCamelCase , text_target=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , batch["""input_ids"""][0] )
self.assertEqual(__lowerCamelCase , batch["""labels"""][0] )
def _A ( self : Union[str, Any] ):
# safety check on max_len default value so we are sure the test works
UpperCamelCase :Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase :List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase :Optional[Any] = tempfile.mkdtemp()
UpperCamelCase :Tuple = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase :str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
UpperCamelCase :Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase :Dict = tempfile.mkdtemp()
UpperCamelCase :List[str] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
UpperCamelCase :str = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase :Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
UpperCamelCase :int = tokenizer.__class__.from_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase :str = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase :str = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase :Dict = json.load(__lowerCamelCase )
UpperCamelCase :int = [F"""<extra_id_{i}>""" for i in range(125 )]
UpperCamelCase :Tuple = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
UpperCamelCase :int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase :List[str] = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase :Any = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCamelCase )]
UpperCamelCase :Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _A ( self : Any ):
UpperCamelCase :Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer_class.from_pretrained(__lowerCamelCase )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _A ( self : Any ):
pass
def _A ( self : str ):
pass
def _A ( self : Dict ):
pass
def _A ( self : Any ):
pass
def _A ( self : str ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
UpperCamelCase :Optional[int] = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase :List[Any] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
UpperCamelCase :Optional[Any] = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase :List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCamelCase :Dict = 0
UpperCamelCase :int = tokenizer.convert_ids_to_tokens(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + """_id""" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + """_id""" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + """_id""" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + """_id""" ) , __lowerCamelCase )
setattr(__lowerCamelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCamelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__lowerCamelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCamelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowerCamelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 62 |
from __future__ import annotations
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowerCamelCase : list[str] ):
UpperCamelCase :list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(__lowerCamelCase )
self.set_fail_transitions()
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _A ( self : int , __lowerCamelCase : str ):
UpperCamelCase :List[str] = 0
for character in keyword:
UpperCamelCase :List[Any] = self.find_next_state(__lowerCamelCase , __lowerCamelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase :Optional[int] = len(self.adlist ) - 1
else:
UpperCamelCase :Union[str, Any] = next_state
self.adlist[current_state]["output"].append(__lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Tuple = 0
while q:
UpperCamelCase :Union[str, Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__lowerCamelCase )
UpperCamelCase :Optional[int] = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(__lowerCamelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
UpperCamelCase :List[Any] = self.adlist[state]["""fail_state"""]
UpperCamelCase :List[str] = self.find_next_state(
__lowerCamelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase :Any = 0
UpperCamelCase :Tuple = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
UpperCamelCase :dict = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase :Union[str, Any] = 0
for i in range(len(__lowerCamelCase ) ):
while (
self.find_next_state(__lowerCamelCase , string[i] ) is None
and current_state != 0
):
UpperCamelCase :List[Any] = self.adlist[current_state]["""fail_state"""]
UpperCamelCase :Optional[Any] = self.find_next_state(__lowerCamelCase , string[i] )
if next_state is None:
UpperCamelCase :Dict = 0
else:
UpperCamelCase :List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase :Optional[int] = []
result[key].append(i - len(__lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase )-> float:
"""simple docstring"""
__A = 0.00
__A = 0
for resistor in resistors:
if resistor <= 0:
__A = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(UpperCAmelCase )
first_sum += 1 / float(UpperCAmelCase )
index += 1
return 1 / first_sum
def snake_case ( UpperCAmelCase )-> float:
"""simple docstring"""
__A = 0.00
__A = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__A = f'Resistor at index {index} has a negative value!'
raise ValueError(UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A ( UpperCamelCase_ : str) -> list[int]:
'''simple docstring'''
return [ord(UpperCamelCase_) - 96 for elem in plain]
def _A ( UpperCamelCase_ : list[int]) -> str:
'''simple docstring'''
return "".join(chr(elem + 96) for elem in encoded)
def _A ( ) -> None:
'''simple docstring'''
__lowercase = encode(input("-> ").strip().lower())
print("Encoded: ", UpperCamelCase_)
print("Decoded:", decode(UpperCamelCase_))
if __name__ == "__main__":
main()
| 144 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 144 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int = 1000 ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = 2**power
__UpperCamelCase = str(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = list(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE__ )
return sum_of_num
if __name__ == "__main__":
a__ : Optional[int] =int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
a__ : Dict =solution(power)
print('''Sum of the digits is: ''', result)
| 53 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=[1, 2, 1],__SCREAMING_SNAKE_CASE=[2, 2, 4],__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=2.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=8,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = patch_norm
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = is_training
__lowerCAmelCase = scope
__lowerCAmelCase = use_labels
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = encoder_stride
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = SwinvaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : int =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a : int =(
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a : str =False
a : int =False
a : Optional[int] =False
a : Any =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SwinvaModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,embed_dim=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE,nn.Linear ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
__lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = config.window_size**2
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],)
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester,"""num_hidden_states_types""" ):
__lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_heads[0], window_size_squared, window_size_squared],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester,"""expected_num_hidden_layers""",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
__lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reshaped_hidden_states[0].shape
__lowerCAmelCase = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,(padded_height, padded_width) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowerCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__SCREAMING_SNAKE_CASE,atol=1e-4 ) )
| 46 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] )
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase_ ( self ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase = torch.arange(self.height * self.width )
__lowerCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase = self.shape
__lowerCamelCase = int(np.prod(lowerCamelCase__ ) )
__lowerCamelCase = self.get_image_coords()
__lowerCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase = self.get_camera_rays(lowerCamelCase__ )
__lowerCamelCase = rays.view(lowerCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase_ ( self , lowerCamelCase__ ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase = coords.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = self.resolution()
__lowerCamelCase = self.fov()
__lowerCamelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase = fracs * torch.tan(fov / 2 )
__lowerCamelCase = fracs.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = (
self.z.view(lowerCamelCase__ , 1 , 3 )
+ self.x.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase__ )
__lowerCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase__ , *lowerCamelCase__ , 2 , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase = np.array([np.sin(UpperCamelCase__ ), np.cos(UpperCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase = -z * 4
__lowerCamelCase = np.array([np.cos(UpperCamelCase__ ), -np.sin(UpperCamelCase__ ), 0.0] )
__lowerCamelCase = np.cross(UpperCamelCase__ , UpperCamelCase__ )
origins.append(UpperCamelCase__ )
xs.append(UpperCamelCase__ )
ys.append(UpperCamelCase__ )
zs.append(UpperCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase__ )) , )
| 90 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Tuple = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_0257 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1e-5 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ):
super().__init__()
UpperCamelCase : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
UpperCamelCase : List[Any] = prefix_inner_dim
UpperCamelCase : Optional[int] = prefix_hidden_dim
UpperCamelCase : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase : Any = (
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase : Tuple = GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , n_positions=SCREAMING_SNAKE_CASE_ , n_embd=SCREAMING_SNAKE_CASE_ , n_layer=SCREAMING_SNAKE_CASE_ , n_head=SCREAMING_SNAKE_CASE_ , n_inner=SCREAMING_SNAKE_CASE_ , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=SCREAMING_SNAKE_CASE_ , embd_pdrop=SCREAMING_SNAKE_CASE_ , attn_pdrop=SCREAMING_SNAKE_CASE_ , layer_norm_epsilon=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE_ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : int = GPTaLMHeadModel(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Union[str, Any] = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self.encode_prefix(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.decode_prefix(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase : List[str] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase : List[str] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase : List[Any] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return torch.zeros(SCREAMING_SNAKE_CASE_ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.encode_prefix(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE_ , 1 , dim=0 )
UpperCamelCase : Tuple = []
UpperCamelCase : Any = []
for feature in features:
UpperCamelCase : Tuple = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE_ ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase , UpperCamelCase : List[Any] = self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase : List[Any] = torch.stack(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def a_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Union[str, Any] = eos_token_id
UpperCamelCase : str = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[Any] = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.int )
UpperCamelCase : str = torch.zeros(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase : Optional[int] = input_embeds
else:
UpperCamelCase : Any = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
UpperCamelCase : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase : List[Any] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase , UpperCamelCase : Any = logits.topk(SCREAMING_SNAKE_CASE_ , -1 )
UpperCamelCase : Union[str, Any] = generated.expand(SCREAMING_SNAKE_CASE_ , *generated.shape[1:] )
UpperCamelCase , UpperCamelCase : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase : Optional[Any] = next_tokens
else:
UpperCamelCase : Optional[Any] = tokens.expand(SCREAMING_SNAKE_CASE_ , *tokens.shape[1:] )
UpperCamelCase : Optional[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase : Union[str, Any] = -float(np.inf )
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase : str = scores_sum / seq_lengths[:, None]
UpperCamelCase , UpperCamelCase : List[Any] = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE_ , -1 )
UpperCamelCase : str = next_tokens // scores_sum.shape[1]
UpperCamelCase : Union[str, Any] = seq_lengths[next_tokens_source]
UpperCamelCase : Dict = next_tokens % scores_sum.shape[1]
UpperCamelCase : Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase : Tuple = tokens[next_tokens_source]
UpperCamelCase : str = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase : Optional[Any] = generated[next_tokens_source]
UpperCamelCase : Optional[int] = scores_sum_average * seq_lengths
UpperCamelCase : Tuple = is_stopped[next_tokens_source]
UpperCamelCase : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase : int = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase : Any = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE_ ).squeeze()
if is_stopped.all():
break
UpperCamelCase : Optional[Any] = scores / seq_lengths
UpperCamelCase : Tuple = scores.argsort(descending=SCREAMING_SNAKE_CASE_ )
# tokens tensors are already padded to max_seq_length
UpperCamelCase : str = [tokens[i] for i in order]
UpperCamelCase : List[Any] = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 27 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : List[Any] = q_groups
UpperCamelCase : Tuple = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : List[str] = post_attention_groups
UpperCamelCase : Tuple = intermediate_groups
UpperCamelCase : int = output_groups
def a_ ( self ):
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.num_labels
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.num_labels
UpperCamelCase : str = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : Dict = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : str = True
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Any = SqueezeBertModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCamelCase : Dict = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 27 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
_a = AutoTokenizer.from_pretrained("google/mt5-small" )
_a = tokenizer("Hello there" , return_tensors="np" ).input_ids
_a = tokenizer("Hi I am" , return_tensors="np" ).input_ids
_a = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id )
_a = model(__a , decoder_input_ids=__a ).logits
_a = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean()
_a = -(labels.shape[-1] * loss.item())
_a = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 63 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase_ ( a__ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : Dict = False
UpperCamelCase_ : str = 3.0
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=SCREAMING_SNAKE_CASE_ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def _snake_case ( self : Optional[Any] ) -> str:
'''simple docstring'''
A: str = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
A: Optional[int] = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A: Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , SCREAMING_SNAKE_CASE_ )
@require_multi_gpu
def _snake_case ( self : Optional[Any] ) -> str:
'''simple docstring'''
A: Tuple = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCamelCase = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase = torch.nn.Linear(100, 200)
UpperCamelCase = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase = ''''''
UpperCamelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 370 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = num_mel_bins
A: str = do_ceptral_normalize
A: int = normalize_means
A: List[Any] = normalize_vars
A: Any = True
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
A: str = x[:input_length].mean(axis=0 )
A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
A: Tuple = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: Optional[int] = padding_value
# make sure array is in float32
A: Optional[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Union[str, Any] = [raw_speech]
# extract fbank features
A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
A: int = BatchFeature({'''input_features''': features} )
A: int = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A: Dict = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A: List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334 | 0 |
'''simple docstring'''
import math
import os
import sys
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ""
try:
with open(_UpperCAmelCase , "rb" ) as binary_file:
_UpperCAmelCase : Tuple = binary_file.read()
for dat in data:
_UpperCAmelCase : int = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase_ ( _UpperCAmelCase : dict[str, str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lexicon.pop(_UpperCAmelCase )
_UpperCAmelCase : int = last_match_id
if math.loga(_UpperCAmelCase ).is_integer():
for curr_key in lexicon:
_UpperCAmelCase : Optional[int] = "0" + lexicon[curr_key]
_UpperCAmelCase : Union[str, Any] = bin(_UpperCAmelCase )[2:]
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase : str = "", ""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
index += 1
_UpperCAmelCase : int = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = os.path.getsize(_UpperCAmelCase )
_UpperCAmelCase : Tuple = bin(_UpperCAmelCase )[2:]
_UpperCAmelCase : int = len(_UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = 8
try:
with open(_UpperCAmelCase , "wb" ) as opened_file:
_UpperCAmelCase : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_UpperCAmelCase : int = read_file_binary(_UpperCAmelCase )
_UpperCAmelCase : Tuple = compress_data(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = add_file_length(_UpperCAmelCase , _UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFPegasusModel(config=A ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ) ->int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case_( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_( self , **A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.translate_src_text(**A )
assert self.expected_text == generated_words
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **A , padding=A , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def snake_case_( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 58 | 0 |
snake_case : str = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __lowercase ( __lowerCAmelCase : str ):
a__ = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
a__ = 0
a__ = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
for arabic, roman in ROMAN:
((a__) , (a__)) = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ : int = logging.getLogger(__name__)
@dataclass
class __a :
__a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(default=lowerCamelCase , metadata={"help": "Whether tp freeze the encoder."} )
__a : bool = field(default=lowerCamelCase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __a :
__a : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__a : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__a : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__a : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__a : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__a : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "Source language id for translation."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "Target language id for translation."} )
__a : Optional[int] = field(default=lowerCamelCase , metadata={"help": "# num_beams to use for evaluation."} )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE__, os.path.join(SCREAMING_SNAKE_CASE__, F"""{split}_results.json""" ) )
def lowerCamelCase_ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
UpperCAmelCase_ : Optional[int] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
assert hasattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
UpperCAmelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Tuple = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE__, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Optional[Any] = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Optional[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE__, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_train
else None
)
UpperCAmelCase_ : int = (
dataset_class(
SCREAMING_SNAKE_CASE__, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : Any = (
dataset_class(
SCREAMING_SNAKE_CASE__, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[int] = (
build_compute_metrics_fn(data_args.task, SCREAMING_SNAKE_CASE__ ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, data_args=SCREAMING_SNAKE_CASE__, train_dataset=SCREAMING_SNAKE_CASE__, eval_dataset=SCREAMING_SNAKE_CASE__, data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : str = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCAmelCase_ : List[str] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : Optional[int] = train_result.metrics
UpperCAmelCase_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''', SCREAMING_SNAKE_CASE__, training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : List[str] = trainer.evaluate(metric_key_prefix='''val''' )
UpperCAmelCase_ : Dict = data_args.n_val
UpperCAmelCase_ : Optional[Any] = round(metrics['''val_loss'''], 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''', SCREAMING_SNAKE_CASE__, training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase_ : Any = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE__, metric_key_prefix='''test''' )
UpperCAmelCase_ : Union[str, Any] = test_output.metrics
UpperCAmelCase_ : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : List[Any] = round(metrics['''test_loss'''], 4 )
handle_metrics('''test''', SCREAMING_SNAKE_CASE__, training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=SCREAMING_SNAKE_CASE__, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = lmap(str.strip, SCREAMING_SNAKE_CASE__ )
write_txt_file(SCREAMING_SNAKE_CASE__, os.path.join(training_args.output_dir, '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE__, os.path.join(training_args.output_dir, '''all_results.json''' ) )
return all_metrics
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 125 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _lowerCAmelCase ( a__ ):
__UpperCAmelCase : Tuple = '''speech_to_text_2'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : List[str] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , UpperCamelCase__=1_0000 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=4 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=1024 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = vocab_size
snake_case : Tuple = d_model
snake_case : List[str] = decoder_ffn_dim
snake_case : str = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : int = dropout
snake_case : str = attention_dropout
snake_case : Optional[int] = activation_dropout
snake_case : str = activation_function
snake_case : List[Any] = init_std
snake_case : Union[str, Any] = decoder_layerdrop
snake_case : Any = use_cache
snake_case : Optional[Any] = decoder_layers
snake_case : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 354 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = OmegaConf.load(lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase ) ) )
return config
def __lowerCAmelCase ( lowercase : Dict , lowercase : Dict=None , lowercase : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if conf_path is None:
snake_case : Optional[Any] = "./model_checkpoints/vqgan_only.yaml"
snake_case : Union[str, Any] = load_config(lowercase , display=lowercase )
snake_case : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
snake_case : Optional[int] = "./model_checkpoints/vqgan_only.pt"
snake_case : Union[str, Any] = torch.load(lowercase , map_location=lowercase )
if ".ckpt" in ckpt_path:
snake_case : Union[str, Any] = sd["state_dict"]
model.load_state_dict(lowercase , strict=lowercase )
model.to(lowercase )
del sd
return model
def __lowerCAmelCase ( lowercase : str , lowercase : List[str] ) -> List[str]:
"""simple docstring"""
snake_case ,snake_case ,snake_case : List[Any] = model.encode(lowercase )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case : Union[str, Any] = model.decode(lowercase )
return xrec
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : str=False ) -> Optional[int]:
"""simple docstring"""
snake_case ,snake_case : Any = string.rsplit("." , 1 )
if reload:
snake_case : List[Any] = importlib.import_module(lowercase )
importlib.reload(lowercase )
return getattr(importlib.import_module(lowercase , package=lowercase ) , cls )
def __lowerCAmelCase ( lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __lowerCAmelCase ( lowercase : Tuple , lowercase : List[str] , lowercase : Tuple=True , lowercase : Optional[Any]=True ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = instantiate_from_config(lowercase )
if sd is not None:
model.load_state_dict(lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if ckpt:
snake_case : Dict = torch.load(lowercase , map_location="cpu" )
snake_case : Any = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
snake_case : Any = {"state_dict": None}
snake_case : List[str] = None
snake_case : Dict = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowercase , eval_mode=lowercase )["model"]
return model, global_step
| 112 | 0 |
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ (self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = GPTNeoXModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=64 , num_attention_heads=8 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def UpperCAmelCase_ (self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
UpperCamelCase__ = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCamelCase__ = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
UpperCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) )
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 366 |
from __future__ import annotations
from collections import Counter
from random import random
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = probability
def UpperCAmelCase_ (self ):
return list(self.connections )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __magic_name__ ( __a : str , __a : list[tuple[str, str, float]] , __a : int ):
'''simple docstring'''
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(__a ):
UpperCamelCase__ = graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowercase_ ( _lowerCamelCase : Any):
if hor == 128:
lowercase__ : List[str] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 128, 256)
lowercase__ : Dict = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase__ : Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : int = (32, 64, 128, 256)
lowercase__ : Any = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase__ : Optional[int] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
lowercase__ : Dict = model.state_dict()
lowercase__ : Any = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase__ : Optional[int] = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Optional[int] = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Optional[Any] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase__ : Optional[int] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch")
lowercase__ : List[Any] = model
lowercase__ : Union[str, Any] = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : Union[str, Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : List[Any] = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin")
with open("hub/hopper-medium-v2/value_function/config.json" , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for i in range(1 , int(sqrt(__UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCAmelCase ):
total += i + n // i
elif i == sqrt(__UpperCAmelCase ):
total += i
return total - n
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE_ = sum(
i
for i in range(1 , __UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(__UpperCAmelCase ) ) == i and sum_of_divisors(__UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 210 |
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = psutil.Process()
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = -1
while True:
SCREAMING_SNAKE_CASE_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_ = True
self.thread.start()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =feature_size
__UpperCamelCase : Tuple =sampling_rate
__UpperCamelCase : List[Any] =padding_value
__UpperCamelCase : List[Any] =kwargs.pop('padding_side' , 'right' )
__UpperCamelCase : Optional[Any] =kwargs.pop('return_attention_mask' , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__UpperCamelCase : List[Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
__UpperCamelCase : Any =processed_features[self.model_input_names[0]]
__UpperCamelCase : Optional[Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
__UpperCamelCase : str =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__UpperCamelCase : Dict =required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__UpperCamelCase : int =0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
__UpperCamelCase : Any =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
__UpperCamelCase : str ='tf'
elif is_torch_tensor(lowerCamelCase__ ):
__UpperCamelCase : Union[str, Any] ='pt'
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
__UpperCamelCase : List[str] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__UpperCamelCase : Dict =to_numpy(lowerCamelCase__ )
else:
__UpperCamelCase : List[Any] =[to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__UpperCamelCase : Dict =self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
__UpperCamelCase : int =processed_features[self.model_input_names[0]]
__UpperCamelCase : int =len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__UpperCamelCase : Union[str, Any] =[]
for i in range(lowerCamelCase__ ):
__UpperCamelCase : Tuple ={k: v[i] for k, v in processed_features.items()}
# truncation
__UpperCamelCase : Tuple =self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__UpperCamelCase : Tuple =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__UpperCamelCase : List[Any] =PaddingStrategy.MAX_LENGTH
__UpperCamelCase : Any ={}
for i in range(lowerCamelCase__ ):
# padding
__UpperCamelCase : Union[str, Any] =self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__UpperCamelCase : str =[]
if value.dtype is np.dtype(np.floataa ):
__UpperCamelCase : Tuple =value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__UpperCamelCase : List[str] =len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__UpperCamelCase : List[Any] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__UpperCamelCase : List[str] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__UpperCamelCase : Tuple =np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__UpperCamelCase : Any =max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__UpperCamelCase : Optional[Any] =np.pad(
processed_features['attention_mask'] , (0, difference) )
__UpperCamelCase : List[str] =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__UpperCamelCase : Union[str, Any] =np.pad(
lowerCamelCase__ , lowerCamelCase__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__UpperCamelCase : Any =np.pad(
processed_features['attention_mask'] , (difference, 0) )
__UpperCamelCase : int =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__UpperCamelCase : str =np.pad(
lowerCamelCase__ , lowerCamelCase__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__UpperCamelCase : str =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__UpperCamelCase : Optional[int] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__UpperCamelCase : Union[str, Any] =len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
__UpperCamelCase : Dict =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__UpperCamelCase : Any =processed_features['attention_mask'][:max_length]
return processed_features
def __lowercase ( self , lowerCamelCase__=False , lowerCamelCase__=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
__UpperCamelCase : Optional[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : Optional[int] =PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =padding
else:
__UpperCamelCase : List[Any] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 71 |
A_ :Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def A ( a_ ) -> str:
assert type(a_ ) in (int, float) and decimal == int(a_ )
__UpperCamelCase : Union[str, Any] =int(a_ )
__UpperCamelCase : List[str] =''
__UpperCamelCase : Optional[Any] =False
if decimal < 0:
__UpperCamelCase : Tuple =True
decimal *= -1
while decimal > 0:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =divmod(a_ ,16 )
__UpperCamelCase : Tuple =values[remainder] + hexadecimal
__UpperCamelCase : Dict ='0x' + hexadecimal
if negative:
__UpperCamelCase : int ='-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 360 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 280 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a_ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCamelCase, scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 50, lowerCAmelCase = "pil", lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowerCamelCase, )
lowerCamelCase_ =image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(_lowerCamelCase, _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCamelCase ), "This is a local test"
| 75 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220 | 0 |
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
while len(__lowerCamelCase ) < 1E6:
constant.append(str(__lowerCamelCase ) )
i += 1
SCREAMING_SNAKE_CASE_ = "".join(__lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution()) | 360 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 210 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = []
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : List[Any] = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : int = attentions
if self.add_downsample:
snake_case_ : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Dict = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : Optional[int] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
if self.add_downsample:
snake_case_ : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = ()
for resnet in self.resnets:
snake_case_ : List[str] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Tuple = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
snake_case_ : str = []
for i in range(self.num_layers ):
snake_case_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Any = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = resnets
snake_case_ : List[str] = attentions
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Optional[Any] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Any = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Optional[int] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Union[str, Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : int = []
for i in range(self.num_layers ):
snake_case_ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
if self.add_upsample:
snake_case_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Any:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : List[str] = res_hidden_states_tuple[-1]
snake_case_ : List[str] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Dict = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Tuple = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : List[Any] = []
for _ in range(self.num_layers ):
snake_case_ : Any = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : str = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
snake_case_ : List[Any] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 1 |
from collections.abc import Sequence
def snake_case_ ( lowerCAmelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__lowercase : str = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
__lowercase : Dict = nums[i]
__lowercase : List[Any] = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : Optional[Any] = int(input('''Enter number of elements : ''').strip())
lowerCamelCase : Optional[Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array)) | 306 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 306 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 5_0_0_0_0_0
UpperCamelCase_ , UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase__( __UpperCamelCase: datasets.Dataset ,**__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = dataset.map(**__UpperCamelCase )
@get_duration
def lowercase__( __UpperCamelCase: datasets.Dataset ,**__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dataset.filter(**__UpperCamelCase )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE : List[str] = generate_example_dataset(
os.path.join(__UpperCamelCase ,'dataset.arrow' ) ,__UpperCamelCase ,num_examples=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' ,use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase: Any ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE : List[str] = map(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = map(__UpperCamelCase ,batched=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE : Optional[Any] = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE : List[Any] = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='torch' ,columns='numbers' ):
SCREAMING_SNAKE_CASE : List[str] = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='tensorflow' ,columns='numbers' ):
SCREAMING_SNAKE_CASE : Optional[int] = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = map(__UpperCamelCase ,function=__UpperCamelCase ,batched=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase ,'wb' ) as f:
f.write(json.dumps(__UpperCamelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 251 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Dict , **snake_case_ : int ) -> Dict:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Union[str, Any] , **snake_case_ : str ) -> List[str]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[str] , **snake_case_ : int ) -> List[str]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : List[Any] ) -> int:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : Optional[Any] , **snake_case_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
def __UpperCAmelCase ( *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(snake_case_ , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class __lowerCamelCase ( metaclass=__lowercase ):
__UpperCamelCase = ['torch']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def A__ (cls , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(cls , ["""torch"""] ) | 317 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 317 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : str ={'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 9 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151 | 0 |
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]:
_snake_case = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_snake_case , _snake_case = True, True
_snake_case = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return path
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = 0
_snake_case = -1
for i in range(__lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_snake_case = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> int:
_snake_case = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_snake_case , _snake_case = check_circuit_or_path(__lowerCamelCase , __lowerCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
_snake_case = 1
if check == 2:
_snake_case = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
_snake_case = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
def _UpperCAmelCase ( ) -> Dict:
_snake_case = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_snake_case = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_snake_case = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_snake_case = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_snake_case = {
1: [],
2: []
# all degree is zero
}
_snake_case = 10
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 40 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 40 | 1 |
A : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
A : Tuple = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 184 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A : Union[str, Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowerCamelCase__ : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
lowerCamelCase__ : List[str] = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}] )
lowerCamelCase__ : Optional[int] = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
lowerCamelCase__ : Any = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
# Legacy behavior
lowerCamelCase__ : Dict = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
lowerCamelCase__ : str = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}]] )
lowerCamelCase__ : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
lowerCamelCase__ : Any = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.5_0_4},
{"label": "LABEL_0", "score": 0.5_0_4},
] , )
@require_torch
def lowerCAmelCase ( self : str ):
'''simple docstring'''
import torch
lowerCamelCase__ : int = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowerCamelCase__ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@require_tf
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowerCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_0_4}] )
@slow
@require_torch
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = pipeline("text-classification" )
lowerCamelCase__ : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCamelCase__ : Optional[int] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCamelCase__ : Tuple = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
@slow
@require_tf
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = pipeline("text-classification" , framework="tf" )
lowerCamelCase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCamelCase__ : Optional[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCamelCase__ : Dict = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_8_8}] )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TextClassificationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : int = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ : List[Any] = "HuggingFace is in"
lowerCamelCase__ : Tuple = text_classifier(__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowerCamelCase__ : Optional[int] = ["HuggingFace is in ", "Paris is in France"]
lowerCamelCase__ : Dict = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}, {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase , top_k=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N, [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N] , )
lowerCamelCase__ : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowerCamelCase__ : List[Any] = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ : Any = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__lowerCamelCase ):
text_classifier(__lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ : int = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 184 | 1 |
def A_ ( A__ ) -> list[list]:
a__ : List[Any] = current_set.copy()
for row_index, row in enumerate(A__ ):
a__ : int = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
a__ : Tuple = column
continue
a__ : int = column / magnitude
# Subtract to cancel term
a__ : Optional[Any] = current_set[0]
a__ : Union[str, Any] = [first_row]
a__ : List[str] = current_set[1::]
for row in current_set:
a__ : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a__ : str = final_set[0]
a__ : List[str] = []
a__ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a__ : Optional[int] = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A__ )
a__ : Optional[int] = resultant
return final_set
def A_ ( A__ ) -> list:
if len(A__ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
a__ : str = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(A__ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
a__ : Tuple = equations.copy()
if any(0 in row for row in data_set ):
a__ : Dict = data_set.copy()
a__ : Dict = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
a__ : str = data_set.pop(A__ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , A__ )
a__ : List[str] = data_set.copy()
a__ : Any = simplify(A__ )
a__ : Optional[Any] = simplified[::-1]
a__ : list = []
for row in simplified:
a__ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a__ : Tuple = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
a__ : str = temp_row[1::]
a__ : Any = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
a__ : Optional[Any] = []
for item in solutions:
final.append(float(round(A__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def _A ( A__ , A__ = 16 , A__ = "bert-base-cased" ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(A__ )
__lowercase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
model.eval()
__lowercase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**A__ )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowercase = metric.compute()
return eval_metric["accuracy"]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
__lowercase = args.model_name_or_path
set_seed(A__ )
__lowercase , __lowercase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowercase = 1
__lowercase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowercase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
__lowercase = evaluate.load('''glue''' , '''mrpc''' )
__lowercase = num_epochs
if args.partial_train_epoch is not None:
__lowercase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase = args.resume_from_checkpoint.split('''epoch_''' )[1]
__lowercase = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase = int(A__ ) + 1
__lowercase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print('''resumed checkpoint performance:''' , A__ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , '''r''' ) as f:
__lowercase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowercase = model(**A__ )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase = F"epoch_{epoch}"
__lowercase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowercase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowercase = accuracy
__lowercase = lr_scheduler.get_lr()[0]
__lowercase = optimizer.param_groups[0]['''lr''']
__lowercase = epoch
__lowercase = overall_step
accelerator.print(F"epoch {epoch}:" , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , '''w''' ) as f:
json.dump(A__ , A__ )
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=A__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A__ , )
parser.add_argument(
'''--output_dir''' , type=A__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=A__ , default=A__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=A__ , default=A__ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=A__ , default=2 , help='''Number of train epochs.''' , )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 104 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if isinstance(A__ , A__ ):
__lowercase = np.full((len(A__ ), sequence_length, 2) , A__ )
else:
__lowercase = np.full((len(A__ ), sequence_length) , A__ )
for i, tensor in enumerate(A__ ):
if padding_side == "right":
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def _A ( A__ ):
"""simple docstring"""
__lowercase = ord(A__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(A__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = -1_0_0
SCREAMING_SNAKE_CASE : str = "pt"
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
import torch
__lowercase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
lowercase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,)
if labels is None:
return batch
__lowercase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(lowercase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase__ )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(lowercase__ )) + list(lowercase__ ) for label in labels
]
__lowercase = [feature['''ner_tags'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,-1 ,lowercase__ ,lowercase__ )
__lowercase = [feature['''original_entity_spans'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,(-1, -1) ,lowercase__ ,lowercase__ )
__lowercase = {k: torch.tensor(lowercase__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 | 1 |
import warnings
from .generation import TFGenerationMixin
class snake_case_ (lowerCamelCase_ ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase_ , )
| 109 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowercase ( a__ : Dict=None ) -> int:
_UpperCamelCase = argparse.ArgumentParser(add_help=a__ , allow_abbrev=a__ )
# The main config parser
_UpperCamelCase = config_command_parser(a__ )
# The subparser to add commands to
_UpperCamelCase = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(a__ , parents=[parent_parser] )
update_command_parser(a__ , parents=[parent_parser] )
return config_parser
def lowercase ( ) -> str:
_UpperCamelCase = get_config_parser()
_UpperCamelCase = config_parser.parse_args()
if not hasattr(a__ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a__ )
if __name__ == "__main__":
main()
| 256 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 2_4000 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = None , __UpperCamelCase : float = None , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_UpperCamelCase = True
_UpperCamelCase = bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCamelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_UpperCamelCase = None
_UpperCamelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCamelCase = min(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCamelCase = max(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCamelCase = '''max_length'''
else:
_UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCamelCase = self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
_UpperCamelCase = padded_inputs.pop('''attention_mask''' )
_UpperCamelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_UpperCamelCase = example[..., None]
input_values.append(example.T )
_UpperCamelCase = input_values
if return_tensors is not None:
_UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 256 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A: Union[str, Any] = datasets.logging.get_logger(__name__)
A: List[str] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
A: int = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
A: Any = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if self.config_name == "default":
UpperCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
if gpus is None:
UpperCAmelCase : Optional[int] = 1 if torch.cuda.is_available() else 0
UpperCAmelCase : Tuple = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCAmelCase : Tuple = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 76 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
UpperCAmelCase : Optional[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[str] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : int = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Dict = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) )
UpperCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 76 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Tuple = []
for rt in rc.restypes:
UpperCamelCase__ : int = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ : Dict = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ : Any = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCamelCase__ : List[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
UpperCamelCase__ : Optional[int] = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
UpperCamelCase__ : Any = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ : Any = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ : Union[str, Any] = residx_atomaa_mask
UpperCamelCase__ : Tuple = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ : Any = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ : Tuple = rc.restype_atoa[restype_letter]
UpperCamelCase__ : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ : Tuple = rc.atom_order[atom_name]
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Any = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ : str = residx_atomaa_mask
return protein
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
UpperCamelCase__ : List[str] = tree_map(lambda __lowerCAmelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
UpperCamelCase__ : List[Any] = tensor_tree_map(lambda __lowerCAmelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out | 189 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = DiTPipeline
__UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__snake_case : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , )
__snake_case : Union[str, Any] = AutoencoderKL()
__snake_case : int = DDIMScheduler()
__snake_case : Tuple = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=0 ) -> int:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = "cpu"
__snake_case : int = self.get_dummy_components()
__snake_case : str = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __snake_case ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__snake_case : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
__snake_case : Optional[Any] = pipe.get_label_ids(lowerCamelCase )
__snake_case : List[Any] = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : int = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__snake_case : Tuple = ["vase", "umbrella"]
__snake_case : List[str] = pipe.get_label_ids(lowerCamelCase )
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : str = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 123 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
UpperCamelCase_ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 357 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 303 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE : Optional[int] = len(A ) - 1
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ), 5 ) == 1
return output_values
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : str = self.basis_function(A )
SCREAMING_SNAKE_CASE : str = 0.0
SCREAMING_SNAKE_CASE : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self, A = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE : Optional[int] = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
A, A, color='blue', label='Curve of Degree ' + str(self.degree ), )
plt.scatter(A, A, color='red', label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 251 | 1 |
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 351 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
SCREAMING_SNAKE_CASE_ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
SCREAMING_SNAKE_CASE_ = f"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCAmelCase )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> dict:
SCREAMING_SNAKE_CASE_ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
SCREAMING_SNAKE_CASE_ = data['First sentence']['value']
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = ', '.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Optional[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Union[str, Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 210 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCAmelCase_ : List[str] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCAmelCase_ : Optional[Any] = get_tests_dir("""fixtures/vocab.json""")
UpperCAmelCase_ : List[Any] = get_tests_dir("""fixtures""")
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = 0
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str = WavaVecaConfig()
SCREAMING_SNAKE_CASE_ : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''')
# save in new folder
model_config.save_pretrained(lowercase_)
processor.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_))
copyfile(lowercase_ , os.path.join(lowercase_ , '''vocab.json'''))
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaProcessor(lowercase_ , lowercase_)
# save in new folder
processor.save_pretrained(lowercase_)
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_) , '''r''') as f:
SCREAMING_SNAKE_CASE_ : List[Any] = json.load(lowercase_)
config_dict.pop('''processor_class''')
with open(os.path.join(lowercase_ , lowercase_) , '''w''') as f:
f.write(json.dumps(lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''')
SCREAMING_SNAKE_CASE_ : str = WavaVecaProcessor(lowercase_ , lowercase_)
# save in new folder
processor.save_pretrained(lowercase_)
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_) , '''r''') as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(lowercase_)
config_dict.pop('''processor_class''')
with open(os.path.join(lowercase_ , lowercase_) , '''w''') as f:
f.write(json.dumps(lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaConfig(processor_class='''Wav2Vec2Processor''')
model_config.save_pretrained(lowercase_)
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , '''vocab.json'''))
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_) , '''w''') as f:
f.write('''{}''')
SCREAMING_SNAKE_CASE_ : int = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowercase_)
self.assertTrue(processor.special_attribute_present)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
SCREAMING_SNAKE_CASE_ : Tuple = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowercase_ , use_fast=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present)
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
AutoProcessor.register(lowercase_ , lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoProcessor.register(lowercase_ , lowercase_)
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowercase_ , '''vocab.txt''')
with open(lowercase_ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
SCREAMING_SNAKE_CASE_ : List[str] = CustomTokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = CustomProcessor(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = AutoProcessor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = False
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = False
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "AutoFeatureExtractor"
__UpperCamelCase = "AutoTokenizer"
__UpperCamelCase = False
try:
AutoConfig.register('''custom''' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
AutoProcessor.register(lowercase_ , lowercase_)
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''')
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowercase_)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertFalse(processor.special_attribute_present)
self.assertFalse(processor.feature_extractor.special_attribute_present)
self.assertFalse(processor.tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowercase_)
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''')
self.assertTrue(processor.special_attribute_present)
self.assertTrue(processor.feature_extractor.special_attribute_present)
self.assertTrue(processor.tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''')
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''')
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-processor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaProcessor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , '''test-processor''') , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaProcessor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , '''test-processor-org''') , push_to_hub=lowercase_ , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE_ : str = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''')
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_))
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomFeatureExtractor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowercase_ , '''vocab.txt''')
with open(lowercase_ , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
SCREAMING_SNAKE_CASE_ : List[str] = CustomTokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = CustomProcessor(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Repository(lowercase_ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token)
processor.save_pretrained(lowercase_)
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , '''tokenizer_config.json''')) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(lowercase_)
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , '''custom_feature_extraction.py''')))
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , '''custom_tokenization.py''')))
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , '''custom_processing.py''')))
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ : Tuple = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=lowercase_)
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''')
| 91 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a_ ( unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ : List[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ : int ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ : int ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __a ( self :Union[str, Any] , _lowercase :str , _lowercase :Optional[Any] , _lowercase :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = ZeroShotClassificationPipeline(
model=__a , tokenizer=__a , candidate_labels=['''polics''', '''health'''])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __a ( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''')
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
# No kwarg
UpperCAmelCase_ = classifier('''Who are you voting for in 2020?''' , ['''politics'''])
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
UpperCAmelCase_ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''])
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
UpperCAmelCase_ = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''')
self.assertEqual(
__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
UpperCAmelCase_ = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''])
self.assertEqual(
__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
UpperCAmelCase_ = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''')
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_ = classifier(['''I am happy'''] , ['''positive''', '''negative'''])
self.assertEqual(
__a , [
{'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]}
for i in range(1)
] , )
UpperCAmelCase_ = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''])
self.assertEqual(
__a , [
{'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]}
for i in range(2)
] , )
with self.assertRaises(__a):
classifier('''''' , candidate_labels='''politics''')
with self.assertRaises(__a):
classifier(__a , candidate_labels='''politics''')
with self.assertRaises(__a):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''')
with self.assertRaises(__a):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__a)
with self.assertRaises(__a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__a , )
self.run_entailment_id(__a)
def __a ( self :int , _lowercase :Pipeline) -> Dict:
UpperCAmelCase_ = zero_shot_classifier.model.config
UpperCAmelCase_ = config.labelaid
UpperCAmelCase_ = zero_shot_classifier.entailment_id
UpperCAmelCase_ = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
UpperCAmelCase_ = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
UpperCAmelCase_ = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
UpperCAmelCase_ = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
UpperCAmelCase_ = original_labelaid
self.assertEqual(__a , zero_shot_classifier.entailment_id)
@require_torch
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''])
@require_torch
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
UpperCAmelCase_ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
UpperCAmelCase_ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __a ( self :List[str]) -> Any:
UpperCAmelCase_ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''')
UpperCAmelCase_ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
UpperCAmelCase_ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __a ( self :str) -> Any:
UpperCAmelCase_ = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''')
UpperCAmelCase_ = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
UpperCAmelCase_ = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SwinConfig()
lowerCamelCase__ : Any = swin_name.split("_" )
lowerCamelCase__ : Optional[int] = name_split[1]
lowerCamelCase__ : Optional[int] = int(name_split[4] )
lowerCamelCase__ : Any = int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase__ : Tuple = 96
lowerCamelCase__ : Optional[int] = (2, 2, 6, 2)
lowerCamelCase__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase__ : Optional[int] = 96
lowerCamelCase__ : Tuple = (2, 2, 18, 2)
lowerCamelCase__ : str = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase__ : Optional[Any] = 128
lowerCamelCase__ : Optional[Any] = (2, 2, 18, 2)
lowerCamelCase__ : Tuple = (4, 8, 16, 32)
else:
lowerCamelCase__ : List[str] = 192
lowerCamelCase__ : Tuple = (2, 2, 18, 2)
lowerCamelCase__ : int = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase__ : Optional[Any] = 21841
else:
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Tuple = "huggingface/label-files"
lowerCamelCase__ : Tuple = "imagenet-1k-id2label.json"
lowerCamelCase__ : Any = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : List[Any] = {int(_A ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = idalabel
lowerCamelCase__ : str = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : str = img_size
lowerCamelCase__ : Union[str, Any] = num_classes
lowerCamelCase__ : str = embed_dim
lowerCamelCase__ : str = depths
lowerCamelCase__ : Any = num_heads
lowerCamelCase__ : Union[str, Any] = window_size
return config
def lowercase_ ( _A : str ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCamelCase__ : Union[str, Any] = "encoder." + name
if "attn.proj" in name:
lowerCamelCase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__ : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowerCamelCase__ : Union[str, Any] = "layernorm.weight"
if name == "norm.bias":
lowerCamelCase__ : str = "layernorm.bias"
if "head" in name:
lowerCamelCase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowerCamelCase__ : Optional[Any] = "swin." + name
return name
def lowercase_ ( _A : List[Any] , _A : List[str] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Tuple = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase__ : str = key.split("." )
lowerCamelCase__ : Optional[int] = int(key_split[1] )
lowerCamelCase__ : Optional[int] = int(key_split[3] )
lowerCamelCase__ : Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : List[str] = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[Any] = val[-dim:, :]
else:
lowerCamelCase__ : str = val[
:dim
]
lowerCamelCase__ : Tuple = val[
dim : dim * 2
]
lowerCamelCase__ : int = val[
-dim:
]
else:
lowerCamelCase__ : Tuple = val
return orig_state_dict
def lowercase_ ( _A : List[Any] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Any = timm.create_model(_A , pretrained=_A )
timm_model.eval()
lowerCamelCase__ : List[Any] = get_swin_config(_A )
lowerCamelCase__ : Dict = SwinForImageClassification(_A )
model.eval()
lowerCamelCase__ : List[Any] = convert_state_dict(timm_model.state_dict() , _A )
model.load_state_dict(_A )
lowerCamelCase__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : int = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowerCamelCase__ : Any = Image.open(requests.get(_A , stream=_A ).raw )
lowerCamelCase__ : int = image_processor(images=_A , return_tensors="pt" )
lowerCamelCase__ : List[Any] = timm_model(inputs["pixel_values"] )
lowerCamelCase__ : List[str] = model(**_A ).logits
assert torch.allclose(_A , _A , atol=1E-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 184 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xlm-roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase__ = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=UpperCAmelCase_ , all_configs=UpperCAmelCase_ , save_infos=UpperCAmelCase_ )
UpperCAmelCase : Dict = TestCommand(*UpperCAmelCase_ )
test_command.run()
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , 'README.md' )
assert os.path.exists(UpperCAmelCase_ )
UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(UpperCAmelCase_ )
UpperCAmelCase : int = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase , UpperCAmelCase : Tuple = getattr(dataset_infos['default'] , UpperCAmelCase_ ), getattr(expected_dataset_infos['default'] , UpperCAmelCase_ )
if key == "num_bytes":
assert is_apercent_close(UpperCAmelCase_ , UpperCAmelCase_ )
elif key == "splits":
assert list(UpperCAmelCase_ ) == list(UpperCAmelCase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 280 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Any = AudioLDMPipeline
_lowercase : Optional[int] = TEXT_TO_AUDIO_PARAMS
_lowercase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
_lowercase : Any = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=UpperCAmelCase , )
__lowerCAmelCase: Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__lowerCAmelCase: List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
__lowerCAmelCase: List[str] = ClapTextModelWithProjection(UpperCAmelCase )
__lowerCAmelCase: Any = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
__lowerCAmelCase: int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCAmelCase , )
__lowerCAmelCase: Union[str, Any] = SpeechTaHifiGan(UpperCAmelCase )
__lowerCAmelCase: List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=0 ) -> Dict:
if str(UpperCAmelCase ).startswith('mps' ):
__lowerCAmelCase: Optional[Any] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCAmelCase: str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCAmelCase: int = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: List[Any] = self.get_dummy_components()
__lowerCAmelCase: Optional[Any] = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = audioldm_pipe(**UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 2_5_6
__lowerCAmelCase: Any = audio[:1_0]
__lowerCAmelCase: Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase: Any = self.get_dummy_components()
__lowerCAmelCase: str = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = audioldm_pipe.to(UpperCAmelCase )
__lowerCAmelCase: List[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Any = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: str = 3 * [inputs['prompt']]
# forward
__lowerCAmelCase: Optional[Any] = audioldm_pipe(**UpperCAmelCase )
__lowerCAmelCase: List[str] = output.audios[0]
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: int = 3 * [inputs.pop('prompt' )]
__lowerCAmelCase: Union[str, Any] = audioldm_pipe.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='pt' , )
__lowerCAmelCase: Dict = text_inputs['input_ids'].to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = audioldm_pipe.text_encoder(
UpperCAmelCase , )
__lowerCAmelCase: str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCAmelCase: Dict = F.normalize(UpperCAmelCase , dim=-1 )
__lowerCAmelCase: Tuple = prompt_embeds
# forward
__lowerCAmelCase: Tuple = audioldm_pipe(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase ( self : List[str] ) -> int:
__lowerCAmelCase: Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase: Dict = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: str = audioldm_pipe.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: List[Any] = 3 * ['this is a negative prompt']
__lowerCAmelCase: List[Any] = negative_prompt
__lowerCAmelCase: Optional[int] = 3 * [inputs['prompt']]
# forward
__lowerCAmelCase: int = audioldm_pipe(**UpperCAmelCase )
__lowerCAmelCase: int = output.audios[0]
__lowerCAmelCase: str = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = 3 * [inputs.pop('prompt' )]
__lowerCAmelCase: Union[str, Any] = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase: str = audioldm_pipe.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors='pt' , )
__lowerCAmelCase: Optional[Any] = text_inputs['input_ids'].to(UpperCAmelCase )
__lowerCAmelCase: Tuple = audioldm_pipe.text_encoder(
UpperCAmelCase , )
__lowerCAmelCase: List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCAmelCase: Any = F.normalize(UpperCAmelCase , dim=-1 )
embeds.append(UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase: str = embeds
# forward
__lowerCAmelCase: Union[str, Any] = audioldm_pipe(**UpperCAmelCase )
__lowerCAmelCase: List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: str = self.get_dummy_components()
__lowerCAmelCase: Tuple = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
__lowerCAmelCase: List[str] = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: Any = 'egg cracking'
__lowerCAmelCase: str = audioldm_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
__lowerCAmelCase: Tuple = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 2_5_6
__lowerCAmelCase: List[Any] = audio[:1_0]
__lowerCAmelCase: Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: List[str] = self.get_dummy_components()
__lowerCAmelCase: Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: List[Any] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__lowerCAmelCase: List[str] = audioldm_pipe(UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowerCAmelCase: List[Any] = 2
__lowerCAmelCase: Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
__lowerCAmelCase: Dict = 2
__lowerCAmelCase: Optional[int] = audioldm_pipe(UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
__lowerCAmelCase: List[Any] = 2
__lowerCAmelCase: Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCAmelCase ( self : List[Any] ) -> str:
__lowerCAmelCase: Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: Dict = self.get_dummy_components()
__lowerCAmelCase: Any = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Any = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Dict = audioldm_pipe.vocoder.config.sampling_rate
__lowerCAmelCase: List[str] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCAmelCase: Dict = audioldm_pipe(audio_length_in_s=0.016 , **UpperCAmelCase )
__lowerCAmelCase: int = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) / vocoder_sampling_rate == 0.016
__lowerCAmelCase: List[Any] = audioldm_pipe(audio_length_in_s=0.032 , **UpperCAmelCase )
__lowerCAmelCase: Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self : str ) -> str:
__lowerCAmelCase: int = self.get_dummy_components()
__lowerCAmelCase: int = AudioLDMPipeline(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = ['hey']
__lowerCAmelCase: Optional[int] = audioldm_pipe(UpperCAmelCase , num_inference_steps=1 )
__lowerCAmelCase: List[str] = output.audios.shape
assert audio_shape == (1, 2_5_6)
__lowerCAmelCase: str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowerCAmelCase: int = SpeechTaHifiGan(UpperCAmelCase ).to(UpperCAmelCase )
__lowerCAmelCase: Dict = audioldm_pipe(UpperCAmelCase , num_inference_steps=1 )
__lowerCAmelCase: List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase )
@slow
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Tuple ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any="cpu" , UpperCAmelCase : List[str]=torch.floataa , UpperCAmelCase : Tuple=0 ) -> int:
__lowerCAmelCase: Union[str, Any] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCAmelCase: List[str] = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
__lowerCAmelCase: Tuple = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
__lowerCAmelCase: Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Dict:
__lowerCAmelCase: Optional[Any] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__lowerCAmelCase: Any = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: str = self.get_inputs(UpperCAmelCase )
__lowerCAmelCase: List[Any] = 2_5
__lowerCAmelCase: List[str] = audioldm_pipe(**UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 8_1_9_2_0
__lowerCAmelCase: Optional[int] = audio[7_7_2_3_0:7_7_2_4_0]
__lowerCAmelCase: int = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__lowerCAmelCase: str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__lowerCAmelCase: Tuple = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowerCAmelCase: List[Any] = audioldm_pipe.to(UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: Dict = self.get_inputs(UpperCAmelCase )
__lowerCAmelCase: str = audioldm_pipe(**UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase ) == 8_1_9_2_0
__lowerCAmelCase: str = audio[2_7_7_8_0:2_7_7_9_0]
__lowerCAmelCase: List[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__lowerCAmelCase: Optional[int] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _a ( SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class __magic_name__ :
A: str = field(
metadata={"help": "The csv file to plot."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
A: bool = field(
default=__lowerCAmelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
A: Optional[List[str]] = list_field(
default=__lowerCAmelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class __magic_name__ :
def __init__( self : Any , lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = args
UpperCamelCase__ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCamelCase__ : Union[str, Any] = csv.DictReader(lowerCamelCase__ )
for row in reader:
UpperCamelCase__ : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCamelCase__ : Any = float(row['''result'''] )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = plt.subplots()
UpperCamelCase__ : Dict = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCamelCase__ : int = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCamelCase__ : Tuple = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCamelCase__ : Dict = self.result_dict[model_name]['''result''']
((UpperCamelCase__) , (UpperCamelCase__)) : int = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCamelCase__ : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCamelCase__ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase__ , )
else:
UpperCamelCase__ : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCamelCase__) , (UpperCamelCase__)) : str = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCamelCase__ : Optional[Any] = np.asarray(lowerCamelCase__ , lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__ , lowerCamelCase__ , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(lowerCamelCase__ , lowerCamelCase__ , '''--''' )
title_str += F" {label_model_name} vs."
UpperCamelCase__ : Optional[Any] = title_str[:-4]
UpperCamelCase__ : List[Any] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
UpperCamelCase__ : Dict = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.