code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'canine'
def __init__( self : str ,lowercase__ : Any=7_6_8 ,lowercase__ : List[str]=1_2 ,lowercase__ : Tuple=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : Tuple=1_6 ,lowercase__ : Any=0.0_2 ,lowercase__ : str=1e-1_2 ,lowercase__ : str=0 ,lowercase__ : Tuple=0xe_000 ,lowercase__ : Optional[int]=0xe_001 ,lowercase__ : List[str]=4 ,lowercase__ : List[str]=4 ,lowercase__ : List[Any]=8 ,lowercase__ : Optional[int]=1_6_3_8_4 ,lowercase__ : Union[str, Any]=1_2_8 ,**lowercase__ : List[str] ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
# Character config:
__lowercase = downsampling_rate
__lowercase = upsampling_kernel_size
__lowercase = num_hash_functions
__lowercase = num_hash_buckets
__lowercase = local_transformer_stride
| 104 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_snake_case = "http://www.mocksite.com/file1.txt"
_snake_case = "\"text\": [\"foo\", \"foo\"]"
_snake_case = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class UpperCAmelCase_ :
lowerCamelCase__ = 200
lowerCamelCase__ = {"""Content-Length""": """100"""}
lowerCamelCase__ = {}
def snake_case__ ( self, **__a):
'''simple docstring'''
return [bytes(lowercase_, "utf-8")]
def A ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
import requests
monkeypatch.setattr(__lowerCamelCase , "request" , __lowerCamelCase )
_lowerCAmelCase : Tuple = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : Tuple = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : List[str] = {"train": url}
_lowerCAmelCase : Union[str, Any] = "dummy"
_lowerCAmelCase : Optional[Any] = "downloads"
_lowerCAmelCase : Optional[int] = tmp_path
_lowerCAmelCase : List[str] = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_lowerCAmelCase : List[Any] = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_lowerCAmelCase : List[Any] = dl_manager.download(__lowerCamelCase )
_lowerCAmelCase : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : str = [downloaded_paths]
_lowerCAmelCase : int = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_lowerCAmelCase : Tuple = downloaded_paths.values()
_lowerCAmelCase : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_lowerCAmelCase : List[str] = Path(__lowerCamelCase )
_lowerCAmelCase : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_lowerCAmelCase : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
_lowerCAmelCase : str = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
_lowerCAmelCase : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : List[str] = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : Optional[int] = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = {"train": filename}
_lowerCAmelCase : Optional[int] = "dummy"
_lowerCAmelCase : List[Any] = xz_file.parent
_lowerCAmelCase : List[Any] = "extracted"
_lowerCAmelCase : Tuple = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_lowerCAmelCase : Any = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_lowerCAmelCase : List[str] = dl_manager.extract(__lowerCamelCase )
_lowerCAmelCase : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase : Optional[int] = [extracted_paths]
_lowerCAmelCase : str = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_lowerCAmelCase : str = extracted_paths.values()
_lowerCAmelCase : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_lowerCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
_lowerCAmelCase : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_lowerCAmelCase : List[str] = extracted_path.read_text()
_lowerCAmelCase : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_lowerCAmelCase : Any = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = request.getfixturevalue(__lowerCamelCase )
_lowerCAmelCase : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = request.getfixturevalue(__lowerCamelCase )
_lowerCAmelCase : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 351 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=True, __a=False, __a=10, __a=3, __a=32 * 8, __a=32 * 8, __a=4, __a=64, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Tuple = use_auxiliary_loss
_lowerCAmelCase : Dict = num_queries
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Union[str, Any] = min_size
_lowerCAmelCase : Tuple = max_size
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Tuple = hidden_dim
_lowerCAmelCase : List[Any] = hidden_dim
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
__UpperCAmelCase)
_lowerCAmelCase : Any = torch.ones([self.batch_size, self.min_size, self.max_size], device=__UpperCAmelCase)
_lowerCAmelCase : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=__UpperCAmelCase) > 0.5
).float()
_lowerCAmelCase : Tuple = (torch.rand((self.batch_size, self.num_labels), device=__UpperCAmelCase) > 0.5).long()
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
_lowerCAmelCase : Optional[Any] = self.num_queries
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = [1, 1, 1, 1]
_lowerCAmelCase : Union[str, Any] = self.num_channels
_lowerCAmelCase : str = 64
_lowerCAmelCase : List[str] = 128
_lowerCAmelCase : str = self.hidden_dim
_lowerCAmelCase : int = self.hidden_dim
_lowerCAmelCase : str = self.hidden_dim
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = output.encoder_hidden_states
_lowerCAmelCase : Union[str, Any] = output.pixel_decoder_hidden_states
_lowerCAmelCase : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase), len(config.backbone_config.depths))
self.parent.assertTrue(len(__UpperCAmelCase), len(config.backbone_config.depths))
self.parent.assertTrue(len(__UpperCAmelCase), config.decoder_layers)
def snake_case__ ( self, __a, __a, __a, __a=False):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Optional[int] = MaskaFormerModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
_lowerCAmelCase : str = model(pixel_values=__UpperCAmelCase, pixel_mask=__UpperCAmelCase)
_lowerCAmelCase : Optional[int] = model(__UpperCAmelCase, output_hidden_states=__UpperCAmelCase)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase, __UpperCAmelCase)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = MaskaFormerForUniversalSegmentation(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
def comm_check_on_output(__a):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(pixel_values=__UpperCAmelCase, pixel_mask=__UpperCAmelCase)
_lowerCAmelCase : List[str] = model(__UpperCAmelCase)
comm_check_on_output(__UpperCAmelCase)
_lowerCAmelCase : Dict = model(
pixel_values=__UpperCAmelCase, pixel_mask=__UpperCAmelCase, mask_labels=__UpperCAmelCase, class_labels=__UpperCAmelCase)
comm_check_on_output(__UpperCAmelCase)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([1]))
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MaskaFormerModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=__UpperCAmelCase, has_text_modality=__UpperCAmelCase)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase, **__UpperCAmelCase, output_hidden_states=__UpperCAmelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__UpperCAmelCase)
@unittest.skip(reason="Mask2Former does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__UpperCAmelCase)
_lowerCAmelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1], __UpperCAmelCase)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (self.model_tester.min_size,) * 2
_lowerCAmelCase : int = {
"pixel_values": torch.randn((2, 3, *size), device=__UpperCAmelCase),
"mask_labels": torch.randn((2, 10, *size), device=__UpperCAmelCase),
"class_labels": torch.zeros(2, 10, device=__UpperCAmelCase).long(),
}
_lowerCAmelCase : Optional[int] = self.model_tester.get_config()
_lowerCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation(__UpperCAmelCase).to(__UpperCAmelCase)
_lowerCAmelCase : Tuple = model(**__UpperCAmelCase)
self.assertTrue(outputs.loss is not None)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__UpperCAmelCase, **__UpperCAmelCase, output_hidden_states=__UpperCAmelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__UpperCAmelCase).to(__UpperCAmelCase)
_lowerCAmelCase : Dict = model(**__UpperCAmelCase, output_attentions=__UpperCAmelCase)
self.assertTrue(outputs.attentions is not None)
def snake_case__ ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : Optional[int] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
_lowerCAmelCase : List[Any] = model(__UpperCAmelCase, mask_labels=__UpperCAmelCase, class_labels=__UpperCAmelCase).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[Any] = model_class(__UpperCAmelCase).to(__UpperCAmelCase)
model.train()
_lowerCAmelCase : List[str] = model(__UpperCAmelCase, mask_labels=__UpperCAmelCase, class_labels=__UpperCAmelCase)
_lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCAmelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_snake_case = 1e-4
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase)
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(__UpperCAmelCase, return_tensors="pt").to(__UpperCAmelCase)
_lowerCAmelCase : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__UpperCAmelCase, (1, 3, 384, 384))
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**__UpperCAmelCase)
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]]).to(__UpperCAmelCase)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], __UpperCAmelCase, atol=__UpperCAmelCase))
_lowerCAmelCase : List[str] = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]]).to(__UpperCAmelCase)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], __UpperCAmelCase, atol=__UpperCAmelCase))
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]]).to(__UpperCAmelCase)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], __UpperCAmelCase, atol=__UpperCAmelCase))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase).eval()
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : List[str] = image_processor(__UpperCAmelCase, return_tensors="pt").to(__UpperCAmelCase)
_lowerCAmelCase : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__UpperCAmelCase, (1, 3, 384, 384))
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**__UpperCAmelCase)
# masks_queries_logits
_lowerCAmelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowerCAmelCase : str = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
_lowerCAmelCase : Optional[int] = torch.tensor(__UpperCAmelCase).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __UpperCAmelCase, atol=__UpperCAmelCase))
# class_queries_logits
_lowerCAmelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
_lowerCAmelCase : str = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __UpperCAmelCase, atol=__UpperCAmelCase))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(__UpperCAmelCase).eval()
_lowerCAmelCase : Any = self.default_image_processor
_lowerCAmelCase : List[Any] = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)], return_tensors="pt", )
_lowerCAmelCase : Dict = inputs["pixel_values"].to(__UpperCAmelCase)
_lowerCAmelCase : Tuple = [el.to(__UpperCAmelCase) for el in inputs["mask_labels"]]
_lowerCAmelCase : Any = [el.to(__UpperCAmelCase) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCAmelCase : int = model(**__UpperCAmelCase)
self.assertTrue(outputs.loss is not None)
| 36 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return None
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCAmelCase ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,'pt' ,12 ,__UpperCAmelCase )
@require_tf
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
A__ = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def snake_case__ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
A__ = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(__UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'tf' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
A__ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A__ , A__ , A__ , A__ = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ['input_ids', 'attention_mask', 'token_type_ids']
A__ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 221 | 0 |
# Imports
import numpy as np
class __a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
self.set_matricies(red=SCREAMING_SNAKE_CASE , green=SCREAMING_SNAKE_CASE , blue=SCREAMING_SNAKE_CASE , red_edge=SCREAMING_SNAKE_CASE , nir=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
if red is not None:
UpperCamelCase__ : List[str] = red
if green is not None:
UpperCamelCase__ : int = green
if blue is not None:
UpperCamelCase__ : int = blue
if red_edge is not None:
UpperCamelCase__ : int = red_edge
if nir is not None:
UpperCamelCase__ : Tuple = nir
return True
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]="" , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
self.set_matricies(red=SCREAMING_SNAKE_CASE , green=SCREAMING_SNAKE_CASE , blue=SCREAMING_SNAKE_CASE , red_edge=SCREAMING_SNAKE_CASE , nir=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def __lowercase ( self : int ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowercase ( self : int ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowercase ( self : int ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowercase ( self : str ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def __lowercase ( self : int ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowercase ( self : Dict ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowercase ( self : Dict ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any]=0.0_8 , SCREAMING_SNAKE_CASE : int=1.2_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.nir - self.green
def __lowercase ( self : str ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : int=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def __lowercase ( self : str ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.nir / self.red
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowercase ( self : int ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def __lowercase ( self : str ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.nir / self.red
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowercase ( self : str ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 196 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase : str =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __a ( A__ ):
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase__ : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase__ : Any = candidate_labels
UpperCamelCase__ : Dict = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [text_inputs]
return inputs
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = model_inputs.pop("candidate_labels" )
UpperCamelCase__ : List[str] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ : Union[str, Any] = text_inputs[0][0]
UpperCamelCase__ : Any = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = model_outputs.pop("candidate_labels" )
UpperCamelCase__ : int = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[Any] = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = [scores]
elif self.framework == "tf":
UpperCamelCase__ : Optional[Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
UpperCamelCase__ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result | 196 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = 'pytorch_model.bin'
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''The name of the task to train on.'''} , )
_UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _A :
_UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=_lowerCamelCase , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowercase : Union[str, Any] = dataset.filter(lambda __magic_name__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowercase : int = int(eval_result * len(__magic_name__ ) )
print(__magic_name__ )
lowercase : List[Any] = dataset.sort('''probability''' , reverse=__magic_name__ )
lowercase : Optional[int] = dataset.select(range(__magic_name__ ) )
lowercase : Any = dataset.remove_columns(['''label''', '''probability'''] )
lowercase : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
lowercase : List[str] = dataset.map(lambda __magic_name__ : {"label": idalabel[example["label"]]} )
lowercase : Union[str, Any] = dataset.shuffle(seed=args.seed )
lowercase : Any = os.path.join(__magic_name__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__magic_name__ , index=__magic_name__ )
else:
dataset.to_json(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowercase : Optional[Any] = STModelArguments(model_name_or_path=__magic_name__ )
lowercase : Optional[Any] = STDataArguments(train_file=__magic_name__ , infer_file=__magic_name__ )
lowercase : Tuple = STTrainingArguments(output_dir=__magic_name__ )
lowercase : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__magic_name__ ).items():
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
for key, value in kwargs.items():
if hasattr(__magic_name__ , __magic_name__ ):
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
# Sanity checks
lowercase : Optional[int] = {}
lowercase : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowercase : Tuple = args.train_file
lowercase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowercase : Union[str, Any] = args.eval_file
for key in data_files:
lowercase : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowercase : Optional[Any] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowercase : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowercase : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
accelerator.wait_for_everyone()
lowercase : Any = None
lowercase : Optional[int] = None
lowercase : str = 0
lowercase : Optional[int] = False
# Show the progress bar
lowercase : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowercase : Optional[int] = data_dir_format(__magic_name__ )
assert os.path.exists(__magic_name__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowercase : Union[str, Any] = os.path.join(__magic_name__ , '''stage-1''' )
lowercase : List[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__magic_name__ , __magic_name__ ):
arguments_dict.update({key: value} )
lowercase : Dict = os.path.join(__magic_name__ , '''best-checkpoint''' , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , __magic_name__ , __magic_name__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , __magic_name__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowercase : List[str] = os.path.join(__magic_name__ , '''best-checkpoint''' )
lowercase : List[str] = os.path.join(__magic_name__ , '''stage-2''' )
# Update arguments_dict
lowercase : Dict = model_path
lowercase : int = data_files['''train''']
lowercase : Dict = current_output_dir
lowercase : Optional[Any] = os.path.join(__magic_name__ , '''best-checkpoint''' , __magic_name__ )
if os.path.exists(__magic_name__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , __magic_name__ , __magic_name__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , __magic_name__ )
finetune(**__magic_name__ )
accelerator.wait_for_everyone()
assert os.path.exists(__magic_name__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , __magic_name__ )
lowercase : int = iteration
lowercase : int = data_dir_format(iteration + 1 )
lowercase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(__magic_name__ , '''best-checkpoint''' ) )
lowercase : Union[str, Any] = config.idalabel
lowercase : List[str] = os.path.join(__magic_name__ , '''eval_results_best-checkpoint.json''' )
lowercase : List[Any] = os.path.join(__magic_name__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(__magic_name__ )
with open(__magic_name__ , '''r''' ) as f:
lowercase : List[Any] = float(json.load(__magic_name__ )[args.eval_metric] )
lowercase : Optional[Any] = os.path.join(__magic_name__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__magic_name__ )
# Loading the dataset from local csv or json files.
lowercase : List[Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowercase : str = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__magic_name__ ):
shutil.copy(__magic_name__ , os.path.join(__magic_name__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.wait_for_everyone()
lowercase : Any = os.path.join(__magic_name__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowercase : Tuple = eval_result
if best_iteration is None:
lowercase : Optional[int] = new_iteration
lowercase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowercase : Optional[Any] = new_iteration
lowercase : Optional[int] = new_eval_result
lowercase : Any = 0
else:
if new_eval_result == best_eval_result:
lowercase : Tuple = new_iteration
lowercase : int = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowercase : Any = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , __magic_name__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__magic_name__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , __magic_name__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__magic_name__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__magic_name__ , '''eval_results_best-iteration.json''' ) , ) | 308 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''') | 308 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
lowercase__ : Dict = '''FlavaImageProcessor'''
lowercase__ : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__magic_name__ : List[Any] = kwargs.pop("""feature_extractor""" )
__magic_name__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__magic_name__ : Optional[Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
__magic_name__ : Tuple = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : int = self.tokenizer.model_input_names
__magic_name__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__ ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase__ , )
return self.image_processor
| 138 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__magic_name__: Union[str, Any] = False
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowercase__ : Tuple = False
lowercase__ : List[str] = TEXT_TO_IMAGE_PARAMS
lowercase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __magic_name__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__magic_name__ : Any = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : int = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = """cpu"""
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : str = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__magic_name__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__magic_name__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __magic_name__ ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def __magic_name__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = torch.manual_seed(51 )
__magic_name__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__magic_name__ : List[str] = """a painting of an elephant with glasses"""
__magic_name__ : Any = [5, 7]
__magic_name__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , token_indices=lowerCAmelCase__ , guidance_scale=7.5 , generator=lowerCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__magic_name__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
def __init__( self :Optional[int] ,__snake_case :bool = True ,__snake_case :Optional[Dict[str, int]] = None ,__snake_case :PILImageResampling = PILImageResampling.BICUBIC ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :Union[int, float] = 1 / 2_55 ,__snake_case :Dict[str, int] = None ,__snake_case :bool = True ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,**__snake_case :Dict ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case ,param_name='crop_size' )
a__ = do_resize
a__ = do_rescale
a__ = do_normalize
a__ = do_center_crop
a__ = crop_size
a__ = size
a__ = resample
a__ = rescale_factor
a__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :List[Any] ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(__snake_case ,size=size['shortest_edge'] ,default_to_square=__snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__snake_case ,size=(size['height'], size['width']) ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :int ) -> np.ndarray:
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :np.ndarray ,__snake_case :Union[float, List[float]] ,__snake_case :Union[float, List[float]] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :ImageInput ,__snake_case :Optional[bool] = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :int = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[float] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__snake_case :Optional[int] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__snake_case ,param_name='crop_size' ,default_to_square=__snake_case )
a__ = resample if resample is not None else self.resample
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
if not is_batched(__snake_case ):
a__ = [images]
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=__snake_case ,size=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(image=__snake_case ,scale=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 240 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A : int = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=_a )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*_a )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
lowercase__ = self.pool(self.model(_a ) )
lowercase__ = torch.flatten(_a , start_dim=2 )
lowercase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class A ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [json.loads(_a ) for l in open(_a )]
lowercase__ = os.path.dirname(_a )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(_a )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__(self : int ) -> Optional[int]:
"""simple docstring"""
return len(self.data )
def __getitem__(self : Dict , _UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=_a ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
lowercase__ = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def UpperCamelCase ( __magic_name__ : List[str] ) -> int:
"""simple docstring"""
lowercase__ = [len(row["""sentence"""] ) for row in batch]
lowercase__ , lowercase__ = len(lowerCAmelCase__ ), max(lowerCAmelCase__ )
lowercase__ = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
lowercase__ = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
lowercase__ = input_row["""sentence"""]
lowercase__ = 1
lowercase__ = torch.stack([row["""image"""] for row in batch] )
lowercase__ = torch.stack([row["""label"""] for row in batch] )
lowercase__ = torch.stack([row["""image_start_token"""] for row in batch] )
lowercase__ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase ( ) -> int:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase ( ) -> str:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case_( a__ ):
__UpperCamelCase = '''mobilenet_v1'''
def __init__( self : str , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Optional[int]=2_2_4 , UpperCamelCase_ : List[Any]=1.0 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : str="relu6" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=0.999 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=0.001 , **UpperCamelCase_ : str , ):
super().__init__(**UpperCamelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : str = image_size
lowerCAmelCase : Any = depth_multiplier
lowerCAmelCase : Tuple = min_depth
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Optional[int] = tf_padding
lowerCAmelCase : int = classifier_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
class snake_case_( a__ ):
__UpperCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : int ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : str ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return 1E-4
| 60 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
import functools
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(__snake_case )
UpperCAmelCase_ : Dict = len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) and isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = len(set_a.intersection(UpperCamelCase_ ) )
if alternative_union:
snake_case = len(UpperCamelCase_ ) + len(UpperCamelCase_ )
else:
snake_case = len(set_a.union(UpperCamelCase_ ) )
return intersection / union
if isinstance(UpperCamelCase_ ,(list, tuple) ) and isinstance(UpperCamelCase_ ,(list, tuple) ):
snake_case = [element for element in set_a if element in set_b]
if alternative_union:
snake_case = len(UpperCamelCase_ ) + len(UpperCamelCase_ )
return len(UpperCamelCase_ ) / union
else:
snake_case = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase_ ) / len(UpperCamelCase_ )
return len(UpperCamelCase_ ) / len(UpperCamelCase_ )
return None
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = {"a", "b", "c", "d", "e"}
_SCREAMING_SNAKE_CASE : List[str] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 127 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''bert-generation'''
def __init__( self : int , __lowerCamelCase : int=5_0_3_5_8 , __lowerCamelCase : Union[str, Any]=1_0_2_4 , __lowerCamelCase : str=2_4 , __lowerCamelCase : int=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : bytes ):
'''simple docstring'''
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
if (len(__snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[Any] = 3
class a ( a_ ):
pass
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = int(os.environ['RANK'] )
lowercase = int(os.environ['WORLD_SIZE'] )
lowercase = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
lowercase = parser.parse_args()
lowercase = args.streaming
lowercase = args.num_workers
lowercase = {'shards': [f'shard_{shard_idx}' for shard_idx in range(__snake_case )]}
lowercase = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
lowercase = Dataset.from_list(list(__snake_case ) )
lowercase = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
lowercase = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
lowercase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 220 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def a_ ( self : List[Any] , a__ : int , a__ : str , a__ : Optional[Any]=False ) -> str:
'''simple docstring'''
if return_pvalue:
_A = pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] )} | 366 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
lowercase__ : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowercase__ : Optional[int] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowercase__ : int = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="uniform_average" , __SCREAMING_SNAKE_CASE=True ) ->Union[str, Any]:
lowerCAmelCase = mean_squared_error(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , multioutput=__SCREAMING_SNAKE_CASE , squared=__SCREAMING_SNAKE_CASE )
return {"mse": mse}
| 338 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase : Optional[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase ( A_ ):
__lowercase = "maskformer"
__lowercase = {"hidden_size": "mask_feature_size"}
__lowercase = ["resnet", "swin"]
__lowercase = ["detr"]
def __init__( self , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = 20.0 , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_snake_case = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(snake_case__ , snake_case__ ):
_snake_case = backbone_config.pop('model_type' )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_snake_case = DetrConfig()
else:
# verify that the decoder is supported
_snake_case = (
decoder_config.pop('model_type' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(snake_case__ , snake_case__ ):
_snake_case = CONFIG_MAPPING[decoder_type]
_snake_case = config_class.from_dict(snake_case__ )
_snake_case = backbone_config
_snake_case = decoder_config
# main feature dimension for the model
_snake_case = fpn_feature_size
_snake_case = mask_feature_size
# initializer
_snake_case = init_std
_snake_case = init_xavier_std
# Hungarian matcher && loss
_snake_case = cross_entropy_weight
_snake_case = dice_weight
_snake_case = mask_weight
_snake_case = use_auxiliary_loss
_snake_case = no_object_weight
_snake_case = output_auxiliary_logits
_snake_case = self.decoder_config.encoder_attention_heads
_snake_case = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.backbone_config.to_dict()
_snake_case = self.decoder_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 360 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def SCREAMING_SNAKE_CASE__ ( __A , __A=1_000 ) -> str:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__A , __A , __A )
if b != 1:
_snake_case = True
for _ in range(__A ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase : Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 160 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=__UpperCAmelCase )
snake_case_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__UpperCAmelCase )
env_command_parser(subparsers=__UpperCAmelCase )
launch_command_parser(subparsers=__UpperCAmelCase )
tpu_command_parser(subparsers=__UpperCAmelCase )
test_command_parser(subparsers=__UpperCAmelCase )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(__UpperCAmelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a ( _lowerCamelCase ):
snake_case_ = "big_bird"
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ):
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : str ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase_ ( lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] = None , ):
'''simple docstring'''
snake_case_ : Any = {}
if train_file is not None:
snake_case_ : Union[str, Any] = [train_file]
if eval_file is not None:
snake_case_ : Any = [eval_file]
if test_file is not None:
snake_case_ : List[Any] = [test_file]
snake_case_ : str = datasets.load_dataset("""csv""" , data_files=lowerCamelCase_ )
snake_case_ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ : int = features_name.pop(lowerCamelCase_ )
snake_case_ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
snake_case_ : List[str] = tokenizer.model_input_names
snake_case_ : List[str] = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
snake_case_ : Any = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
snake_case_ : int = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
snake_case_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
snake_case_ : Tuple = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__A : Dict = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase : int = field(metadata={'help': 'Which column contains the label'} )
lowercase : str = field(default=lowercase__ , metadata={'help': 'The path of the training file'} )
lowercase : Optional[str] = field(default=lowercase__ , metadata={'help': 'The path of the development file'} )
lowercase : Optional[str] = field(default=lowercase__ , metadata={'help': 'The path of the test file'} )
lowercase : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __UpperCamelCase :
lowercase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : bool = field(default=lowercase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCAmelCase_ ( ):
'''simple docstring'''
snake_case_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_ : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase_ :EvalPrediction ) -> Dict:
snake_case_ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ : Optional[Any] = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : Optional[Any] = trainer.evaluate()
snake_case_ : List[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main() | 365 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase ) | 8 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {}
class snake_case_( a__ ):
__UpperCamelCase = '''llama'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self : str , UpperCamelCase_ : Optional[int]=3_2_0_0_0 , UpperCamelCase_ : Tuple=4_0_9_6 , UpperCamelCase_ : Optional[Any]=1_1_0_0_8 , UpperCamelCase_ : Dict=3_2 , UpperCamelCase_ : Optional[int]=3_2 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int="silu" , UpperCamelCase_ : int=2_0_4_8 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-6 , UpperCamelCase_ : str=True , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : int , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : int = num_key_value_heads
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : int = rms_norm_eps
lowerCAmelCase : str = pretraining_tp
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase__ ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowerCAmelCase : List[str] = self.rope_scaling.get('''type''' , UpperCamelCase_ )
lowerCAmelCase : List[str] = self.rope_scaling.get('''factor''' , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 60 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Optional[int]=[2, 2, 4] , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Dict=8 , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : Any = num_heads
lowerCAmelCase : int = window_size
lowerCAmelCase : List[Any] = mlp_ratio
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : str = drop_path_rate
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Union[str, Any] = patch_norm
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = scope
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = encoder_stride
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[str] = SwinvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = self.type_sequence_label_size
lowerCAmelCase : Optional[Any] = SwinvaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = SwinvaModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowerCamelCase__ ( self : Dict ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
lowerCAmelCase : Any = True
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.attentions
lowerCAmelCase : int = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = config.window_size**2
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase : str = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = True
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowerCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.hidden_states
lowerCAmelCase : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swinv2 has a different seq_length
lowerCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = reshaped_hidden_states[0].shape
lowerCAmelCase : Optional[Any] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = 3
lowerCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = SwinvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase : Any = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = 'T5Config'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
| 355 |
'''simple docstring'''
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
return math.sqrt(a_ ) * math.sqrt(a_ ) == num
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
A_ : Tuple = 0
A_ : Tuple = n
while left <= right:
A_ : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A_ : Dict = mid - 1
else:
A_ : str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class A_ ( __snake_case ):
"""simple docstring"""
__UpperCamelCase = """donut-swin"""
__UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :str , lowercase_ :Optional[Any]=2_24 , lowercase_ :Optional[Any]=4 , lowercase_ :List[str]=3 , lowercase_ :List[Any]=96 , lowercase_ :List[Any]=[2, 2, 6, 2] , lowercase_ :Dict=[3, 6, 12, 24] , lowercase_ :int=7 , lowercase_ :Union[str, Any]=4.0 , lowercase_ :List[Any]=True , lowercase_ :List[Any]=0.0 , lowercase_ :str=0.0 , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str="gelu" , lowercase_ :Tuple=False , lowercase_ :str=0.02 , lowercase_ :List[Any]=1E-5 , **lowercase_ :Dict , ) -> int:
super().__init__(**a_ )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(a_ )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(a_ ) - 1) )
| 78 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
"""
SCREAMING_SNAKE_CASE : Any = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE : Dict = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = 16 , a_ = True , a_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : Optional[Any] = '''cuda'''
else:
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : int = AutoModelForCausalLM.from_pretrained(a_ )
__snake_case : Optional[int] = model.to(a_ )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : List[Any] = model.config.max_length - 1
else:
__snake_case : Dict = model.config.max_length
__snake_case : Tuple = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
__snake_case : List[Any] = encodings['''input_ids''']
__snake_case : str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : Union[str, Any] = []
__snake_case : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
__snake_case : Optional[int] = min(start_index + batch_size , len(a_ ) )
__snake_case : int = encoded_texts[start_index:end_index]
__snake_case : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
__snake_case : List[Any] = encoded_batch
with torch.no_grad():
__snake_case : List[str] = model(a_ , attention_mask=a_ ).logits
__snake_case : List[str] = out_logits[..., :-1, :].contiguous()
__snake_case : int = labels[..., 1:].contiguous()
__snake_case : int = attn_mask[..., 1:].contiguous()
__snake_case : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 102 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
lowercase__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__magic_name__ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(__magic_name__ , """func""" ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(__magic_name__ )
service.run()
if __name__ == "__main__":
main()
| 146 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''roberta-prelayernorm'''
def __init__(self : Dict , _UpperCAmelCase : List[Any]=5_0265 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 146 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = '''xlm-roberta-xl'''
def __init__( self , lowerCAmelCase__=2_5_0_8_8_0 , lowerCAmelCase__=2_5_6_0 , lowerCAmelCase__=3_6 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_0_2_4_0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_4 , lowerCAmelCase__=1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 100 |
"""simple docstring"""
from functools import lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : Optional[int] =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return len(unique_prime_factors(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
return len(set(__lowerCamelCase ) ) in (0, 1)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
# Increment each value of a generated range
lowerCamelCase__ : Tuple =[base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase__ : Optional[Any] =[upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case__ ( __lowerCamelCase : int = 4 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 238 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
lowercase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :Dict ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :int ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowercase )
def UpperCAmelCase ( self :List[str] , **_lowercase :List[Any] ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase )
lowercase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = processor(text=_lowercase , return_tensors="np" )
lowercase__ = tokenizer(_lowercase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "lower newer"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = ["cat", "nasa badge"]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = [["cat", "nasa badge"], ["person"]]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
lowercase__ = len(_lowercase )
lowercase__ = max([len(_lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = "google/owlvit-base-patch32"
lowercase__ = OwlViTProcessor.from_pretrained(_lowercase )
lowercase__ = ["cat", "nasa badge"]
lowercase__ = processor(text=_lowercase )
lowercase__ = 16
lowercase__ = inputs["input_ids"]
lowercase__ = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(images=_lowercase , query_images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 0 |
'''simple docstring'''
def a_ ( __snake_case : list ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__snake_case ) == 1:
return True
lowerCamelCase_ =series[1] - series[0]
for index in range(len(__snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a_ ( __snake_case : list ) -> float:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCamelCase_ =0
for val in series:
answer += val
return answer / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCamelCase = 8
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=BITS ) -> Any:
__lowerCamelCase : List[str] = x.device
__lowerCamelCase : int = (x * 255).int().clamp(0 ,255 )
__lowerCamelCase : Any = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCAmelCase )
__lowerCamelCase : List[str] = rearrange(_lowerCAmelCase ,'d -> d 1 1' )
__lowerCamelCase : Optional[Any] = rearrange(_lowerCAmelCase ,'b c h w -> b c 1 h w' )
__lowerCamelCase : Optional[Any] = ((x & mask) != 0).float()
__lowerCamelCase : Tuple = rearrange(_lowerCAmelCase ,'b c d h w -> b (c d) h w' )
__lowerCamelCase : Union[str, Any] = bits * 2 - 1
return bits
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=BITS ) -> Optional[Any]:
__lowerCamelCase : int = x.device
__lowerCamelCase : int = (x > 0).int()
__lowerCamelCase : List[str] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=_lowerCAmelCase ,dtype=torch.intaa )
__lowerCamelCase : Any = rearrange(_lowerCAmelCase ,'d -> d 1 1' )
__lowerCamelCase : Any = rearrange(_lowerCAmelCase ,'b (c d) h w -> b c d h w' ,d=8 )
__lowerCamelCase : List[str] = reduce(x * mask ,'b c d h w -> b c h w' ,'sum' )
return (dec / 255).clamp(0.0 ,1.0 )
def a_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = True ,_lowerCAmelCase=None ,_lowerCAmelCase = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCamelCase : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCamelCase : str = self.alphas_cumprod[timestep]
__lowerCamelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCamelCase : Dict = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : Optional[Any] = torch.clamp(_lowerCAmelCase ,-scale ,_lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCamelCase : int = self._get_variance(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCamelCase : List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCamelCase : Dict = model_output.device if torch.is_tensor(_lowerCAmelCase ) else 'cpu'
__lowerCamelCase : Dict = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = self._get_variance(_lowerCAmelCase ,_lowerCAmelCase ) ** 0.5 * eta * noise
__lowerCamelCase : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase ,pred_original_sample=_lowerCAmelCase )
def a_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="epsilon" ,_lowerCAmelCase=None ,_lowerCAmelCase = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
__lowerCamelCase : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCamelCase ,__lowerCamelCase : int = torch.split(_lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
__lowerCamelCase : Optional[Any] = None
# 1. compute alphas, betas
__lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
__lowerCamelCase : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCamelCase : Any = 1 - alpha_prod_t
__lowerCamelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCamelCase : Any = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
__lowerCamelCase : Optional[Any] = self.bit_scale
if self.config.clip_sample:
__lowerCamelCase : Union[str, Any] = torch.clamp(_lowerCAmelCase ,-scale ,_lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCamelCase : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCamelCase : Optional[Any] = 0
if t > 0:
__lowerCamelCase : Any = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=_lowerCAmelCase ).to(model_output.device )
__lowerCamelCase : Optional[Any] = (self._get_variance(_lowerCAmelCase ,predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
__lowerCamelCase : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase ,pred_original_sample=_lowerCAmelCase )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[str] , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, DDPMScheduler] , _a : Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase : Dict = bit_scale
__lowerCamelCase : List[str] = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Tuple , _a : Optional[int] = 256 , _a : Optional[int] = 256 , _a : Optional[int] = 50 , _a : Optional[torch.Generator] = None , _a : Optional[int] = 1 , _a : Optional[str] = "pil" , _a : bool = True , **_a : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCamelCase : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
__lowerCamelCase : int = decimal_to_bits(_a ) * self.bit_scale
__lowerCamelCase : Dict = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCamelCase : int = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
__lowerCamelCase : Dict = bits_to_decimal(_a )
if output_type == "pil":
__lowerCamelCase : int = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class a_ ( _UpperCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RealmTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop("""text_pair""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("""return_tensors""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get("""input_ids""" )
UpperCamelCase = encoded_candidates.get("""attention_mask""" )
UpperCamelCase = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 368 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'spm_char.model'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
SCREAMING_SNAKE_CASE__ = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [1]
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 183 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_lowercase ):
__A = ["torch", "scipy"]
def __init__( self : List[Any] , *lowercase : List[str] , **lowercase : str ):
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls : List[Any] , *lowercase : Any , **lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls : List[Any] , *lowercase : Dict , **lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 223 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 0 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : Dict ) -> Any:
if not input_list:
return []
__a = [input_list.count(lowercase__ ) for value in input_list]
__a = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Any = {
"camembert-base": 512,
}
_UpperCAmelCase : List[Any] = "▁"
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict="<s>" , _lowerCAmelCase: Union[str, Any]="</s>" , _lowerCAmelCase: Optional[int]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: Tuple="<unk>" , _lowerCAmelCase: Union[str, Any]="<pad>" , _lowerCAmelCase: str="<mask>" , _lowerCAmelCase: int=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase :Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase :int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowercase :Tuple = len(self.fairseq_tokens_to_ids )
lowercase :int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :List[Any] = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :Tuple = []
lowercase :Any = ""
lowercase :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase :Optional[int] = True
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :str = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Dict ):
lowercase :int = self.__dict__.copy()
lowercase :List[str] = None
return state
def __setstate__( self: Optional[Any] , _lowerCAmelCase: Dict ):
lowercase :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :Optional[int] = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :Any = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 236 |
import os
import pytest
from attr import dataclass
_UpperCAmelCase : List[str] = "us-east-1" # defaults region
@dataclass
class __lowerCAmelCase :
_a = 42
_a = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_a = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
_a = {**hyperparameters, '''max_steps''': 1000}
@property
def SCREAMING_SNAKE_CASE ( self: str ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return F"{self.framework}-transfromers-test"
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 236 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """vit"""
def __init__( self : Dict , UpperCamelCase__ : int=768 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : Tuple=224 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : str=16 , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = qkv_bias
SCREAMING_SNAKE_CASE : int = encoder_stride
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.11""")
@property
def __A ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self : Any ):
'''simple docstring'''
return 1E-4
| 355 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( _lowercase , _lowercase ):
# Load checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : Optional[Any] = v
else:
SCREAMING_SNAKE_CASE : List[Any] = v
SCREAMING_SNAKE_CASE : Dict = chkpt['''params''']
SCREAMING_SNAKE_CASE : Optional[Any] = {n: v for n, v in config.items() if not isinstance(_lowercase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : Any = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 258 | 0 |
"""simple docstring"""
import functools
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : str = len(UpperCamelCase )
UpperCAmelCase : int = len(UpperCamelCase )
@functools.cache
def min_distance(UpperCamelCase : int , UpperCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase ) , 1 + min_distance(UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: str = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_lowerCAmelCase : Union[str, Any] = nums[0]
for i in range(1 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : Optional[Any] = nums[i]
_lowerCAmelCase : Dict = max(_lowerCamelCase ,ans + num ,_lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_a : str = int(input('Enter number of elements : ').strip())
_a : Optional[int] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 351 | """simple docstring"""
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Tuple = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 126 | 0 |
'''simple docstring'''
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = [[0] * n for i in range(__snake_case )]
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 , __snake_case ):
for j in range(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ) -> bool:
__A : NDArray[floataa] = cross(__snake_case , __snake_case )
__A : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ : List[Any] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ : Optional[int] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 190 | 0 |
"""simple docstring"""
__UpperCamelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ):
# Return True if there is node that has not iterated.
lowerCAmelCase = [False] * len(_UpperCAmelCase )
lowerCAmelCase = [s]
lowerCAmelCase = True
while queue:
lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
lowerCAmelCase = True
lowerCAmelCase = u
return visited[t]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = [-1] * (len(_UpperCAmelCase ))
lowerCAmelCase = 0
lowerCAmelCase = []
lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = float('Inf' )
lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase = min(_UpperCAmelCase , graph[parent[s]][s] )
lowerCAmelCase = parent[s]
max_flow += path_flow
lowerCAmelCase = sink
while v != source:
lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 309 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309 | 1 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = arr.split(''',''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [int(self.array[0])] * len(self.array)
SCREAMING_SNAKE_CASE_ : Optional[int] = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
SCREAMING_SNAKE_CASE_ : Dict = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
SCREAMING_SNAKE_CASE_ : int = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
UpperCAmelCase_ : Any = input("""please input some numbers:""")
UpperCAmelCase_ : int = SubArray(whole_array)
UpperCAmelCase_ : Optional[Any] = array.solve_sub_array()
print(("""the results is:""", re))
| 91 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """table-transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A_ , A_ ):
__lowerCAmelCase : int = backbone_config.get('''model_type''' )
__lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Any = config_class.from_dict(A_ )
# set timm attributes to None
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None
__lowerCAmelCase : Tuple = use_timm_backbone
__lowerCAmelCase : Optional[Any] = backbone_config
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Tuple = num_queries
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_ffn_dim
__lowerCAmelCase : Optional[int] = encoder_layers
__lowerCAmelCase : List[str] = encoder_attention_heads
__lowerCAmelCase : str = decoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = decoder_layers
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[int] = dropout
__lowerCAmelCase : Any = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Tuple = init_xavier_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[Any] = auxiliary_loss
__lowerCAmelCase : Optional[Any] = position_embedding_type
__lowerCAmelCase : Tuple = backbone
__lowerCAmelCase : Any = use_pretrained_backbone
__lowerCAmelCase : int = dilation
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : List[str] = bbox_cost
__lowerCAmelCase : int = giou_cost
# Loss coefficients
__lowerCAmelCase : Optional[Any] = mask_loss_coefficient
__lowerCAmelCase : Tuple = dice_loss_coefficient
__lowerCAmelCase : int = bbox_loss_coefficient
__lowerCAmelCase : List[Any] = giou_loss_coefficient
__lowerCAmelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.d_model
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-5
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 12
| 275 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = 1
__a = 3
__a = (3_2, 3_2)
__a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(A__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet_upscale
__a = DDPMScheduler()
__a = DDIMScheduler(prediction_type='v_prediction' )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_5_0 , )
__a = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__a = 'A painting of a squirrel eating a burger'
__a = torch.Generator(device=A__ ).manual_seed(0 )
__a = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a = output.images
__a = torch.Generator(device=A__ ).manual_seed(0 )
__a = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=A__ , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
__a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__a = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.dummy_cond_unet_upscale
__a = DDPMScheduler()
__a = DDIMScheduler(prediction_type='v_prediction' )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_5_0 , )
__a = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__a = 'A painting of a squirrel eating a burger'
__a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a = output.images
assert image.shape[0] == 2
__a = torch.Generator(device=A__ ).manual_seed(0 )
__a = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.dummy_cond_unet_upscale
__a = DDPMScheduler()
__a = DDIMScheduler(prediction_type='v_prediction' )
__a = self.dummy_vae
__a = self.dummy_text_encoder
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(A__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
__a = unet.half()
__a = text_encoder.half()
# make sure here that pndm scheduler skips prk
__a = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_5_0 , )
__a = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
__a = 'A painting of a squirrel eating a burger'
__a = torch.manual_seed(0 )
__a = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type='np' , ).images
__a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__a = 'stabilityai/stable-diffusion-x4-upscaler'
__a = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
__a = 'a cat sitting on a park bench'
__a = torch.manual_seed(0 )
__a = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type='np' , )
__a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__a = 'stabilityai/stable-diffusion-x4-upscaler'
__a = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
__a = 'a cat sitting on a park bench'
__a = torch.manual_seed(0 )
__a = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type='np' , )
__a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a = 'stabilityai/stable-diffusion-x4-upscaler'
__a = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = 'a cat sitting on a park bench'
__a = torch.manual_seed(0 )
__a = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type='np' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 359 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( __snake_case ):
A__ : Dict = 'fnet'
def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=False , UpperCAmelCase=5_1_2 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_tpu_fourier_optimizations
__a = tpu_short_seq_length
| 197 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = (EulerDiscreteScheduler,)
lowerCamelCase_ : Optional[Any] = 10
def lowerCamelCase (self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__magic_name__ )
return config
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Any = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : str = self.dummy_model()
snake_case_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Optional[Any] = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Optional[Any] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
snake_case_ : Any = model(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
snake_case_ : Union[str, Any] = output.prev_sample
snake_case_ : Any = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : str = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case_ : Tuple = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Tuple = self.dummy_model()
snake_case_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : List[str] = sample.to(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
snake_case_ : str = model(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
snake_case_ : Optional[int] = output.prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ : Union[str, Any] = sample.to(__magic_name__ )
for t in scheduler.timesteps:
snake_case_ : Optional[int] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ , __magic_name__ )
snake_case_ : Dict = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
snake_case_ : Tuple = output.prev_sample
snake_case_ : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : Optional[int] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**__magic_name__ , use_karras_sigmas=__magic_name__ )
scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ )
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : List[str] = self.dummy_model()
snake_case_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case_ : Tuple = sample.to(__magic_name__ )
for t in scheduler.timesteps:
snake_case_ : Optional[int] = scheduler.scale_model_input(__magic_name__ , __magic_name__ )
snake_case_ : Union[str, Any] = model(__magic_name__ , __magic_name__ )
snake_case_ : int = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ )
snake_case_ : List[str] = output.prev_sample
snake_case_ : Tuple = torch.sum(torch.abs(__magic_name__ ) )
snake_case_ : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 359 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''KEY''')
lowerCAmelCase_ = TypeVar('''VAL''')
@dataclass(frozen=_a, slots=_a )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowerCamelCase_ : KEY
lowerCamelCase_ : VAL
class __lowerCAmelCase ( _Item ):
def __init__(self ) -> None:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
def __bool__(self ) -> bool:
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : List[str] = capacity_factor
snake_case_ : int = 0
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return hash(__magic_name__ ) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : Optional[Any] = _Item(__magic_name__ , __magic_name__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : List[Any] = _Item(__magic_name__ , __magic_name__ )
return True
else:
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : List[str] = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : Dict = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : Tuple = self._get_next_ind(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ):
break
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__magic_name__ , __magic_name__ )
def __delitem__(self , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , __magic_name__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self._len
def __iter__(self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 279 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple ='align_text_model'
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = vocab_size
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Any = hidden_act
UpperCamelCase :Any = intermediate_size
UpperCamelCase :Tuple = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = max_position_embeddings
UpperCamelCase :Tuple = type_vocab_size
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Union[str, Any] = layer_norm_eps
UpperCamelCase :str = position_embedding_type
UpperCamelCase :Union[str, Any] = use_cache
UpperCamelCase :Tuple = pad_token_id
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Tuple = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase :Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple ='align_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 600 , SCREAMING_SNAKE_CASE_ = 2.0 , SCREAMING_SNAKE_CASE_ = 3.1 , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE_ = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE_ = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE_ = [] , SCREAMING_SNAKE_CASE_ = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE_ = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE_ = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE_ = 0.25 , SCREAMING_SNAKE_CASE_ = "swish" , SCREAMING_SNAKE_CASE_ = 2560 , SCREAMING_SNAKE_CASE_ = "mean" , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 0.001 , SCREAMING_SNAKE_CASE_ = 0.99 , SCREAMING_SNAKE_CASE_ = 0.2 , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :List[str] = image_size
UpperCamelCase :List[str] = width_coefficient
UpperCamelCase :Optional[Any] = depth_coefficient
UpperCamelCase :Optional[Any] = depth_divisor
UpperCamelCase :Tuple = kernel_sizes
UpperCamelCase :Dict = in_channels
UpperCamelCase :Optional[int] = out_channels
UpperCamelCase :Optional[int] = depthwise_padding
UpperCamelCase :Optional[int] = strides
UpperCamelCase :Dict = num_block_repeats
UpperCamelCase :Dict = expand_ratios
UpperCamelCase :int = squeeze_expansion_ratio
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Dict = hidden_dim
UpperCamelCase :str = pooling_type
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Optional[Any] = batch_norm_eps
UpperCamelCase :List[Any] = batch_norm_momentum
UpperCamelCase :Tuple = drop_connect_rate
UpperCamelCase :Dict = sum(SCREAMING_SNAKE_CASE_ ) * 4
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase :Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='align'
UpperCamelCase_ : Tuple =True
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=640 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
UpperCamelCase :Union[str, Any] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase :Optional[int] = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCamelCase :Optional[Any] = AlignTextConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = AlignVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = projection_dim
UpperCamelCase :Any = temperature_init_value
UpperCamelCase :List[Any] = initializer_range
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Dict = copy.deepcopy(self.__dict__ )
UpperCamelCase :Dict = self.text_config.to_dict()
UpperCamelCase :Optional[Any] = self.vision_config.to_dict()
UpperCamelCase :Tuple = self.__class__.model_type
return output
| 259 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any="attention" ):
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[int] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
if split_mlp_wi:
UpperCamelCase :List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :str = (wi_a, wi_a)
else:
UpperCamelCase :Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :Tuple = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :List[Any] = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Dict = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Any = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Any = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Optional[Any] = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[str] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :str = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :Any = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Optional[int] = o.T
UpperCamelCase :Tuple = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Tuple = layer_norm
UpperCamelCase :Optional[Any] = k.T
UpperCamelCase :List[str] = o.T
UpperCamelCase :List[str] = q.T
UpperCamelCase :str = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[str] = wi[0].T
UpperCamelCase :str = wi[1].T
else:
UpperCamelCase :Dict = wi.T
UpperCamelCase :Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Tuple = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :List[Any] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Dict = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :Any = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :List[str] = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 259 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 38 |
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 | 1 |
"""simple docstring"""
def lowercase ( _snake_case : str ) ->list:
"""simple docstring"""
if n_term == "":
return []
__snake_case : list = []
for temp in range(int(_snake_case ) ):
series.append(f"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 102 |
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case : Tuple = str(bin(_snake_case ) )[2:] # remove the leading "0b"
__snake_case : List[Any] = str(bin(_snake_case ) )[2:]
__snake_case : Any = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
UpperCamelCase__ : Optional[int] = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
assert type(snake_case_ ) in (int, float) and decimal == int(snake_case_ )
a = int(snake_case_ )
a = ''''''
a = False
if decimal < 0:
a = True
decimal *= -1
while decimal > 0:
a , a = divmod(snake_case_, 1_6 )
a = values[remainder] + hexadecimal
a = '''0x''' + hexadecimal
if negative:
a = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCamelCase__ : Any = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ):
'''simple docstring'''
a = vertices
a = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a = weight
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = Graph({min(self.vertices )} ,{} )
a = 42
a = 42
a = 42
a = 42
while len(subgraph.vertices ) < len(self.vertices ):
a = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a = edge
a = weight
subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase )
return subgraph
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int:
"""simple docstring"""
a = os.path.abspath(os.path.dirname(snake_case_ ) )
a = os.path.join(snake_case_, snake_case_ )
a = {}
a = 42
a = 42
a = 42
with open(snake_case_ ) as f:
a = f.read().strip().split('''\n''' )
a = [line.split(''',''' ) for line in data]
for edgea in range(1, len(snake_case_ ) ):
for edgea in range(snake_case_ ):
if adjaceny_matrix[edgea][edgea] != "-":
a = int(adjaceny_matrix[edgea][edgea] )
a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ )
a = graph.prims_algorithm()
a = sum(graph.edges.values() )
a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 330 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> int:
def get_masked_lm_array(A__ ):
a__ : Dict = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : List[str] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : str = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_array(A__ ):
a__ : Dict = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : str = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : Tuple = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_layer_array(A__ , A__ ):
a__ : str = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : List[str] = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
a__ : str = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_attention_layer_array(A__ , A__ , A__ ):
a__ : Tuple = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ : Dict = tf.train.load_variable(A__ , A__ )
a__ : List[Any] = array.reshape(A__ )
if "kernel" in name:
a__ : Tuple = array.transpose()
return torch.from_numpy(A__ )
print(F'Loading model based on config from {config_path}...' )
a__ : List[str] = BertConfig.from_json_file(A__ )
a__ : str = BertForMaskedLM(A__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
a__ : BertSelfAttention = layer.attention.self
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a__ : Optional[Any] = get_encoder_attention_layer_array(
A__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a__ : Union[str, Any] = get_encoder_attention_layer_array(
A__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
a__ : Dict = get_encoder_attention_layer_array(
A__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a__ : List[Any] = get_encoder_attention_layer_array(
A__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a__ : BertSelfOutput = layer.attention.output
a__ : Optional[Any] = get_encoder_attention_layer_array(
A__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a__ : Any = get_encoder_attention_layer_array(
A__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
a__ : str = get_encoder_layer_array(A__ , '_attention_layer_norm/gamma' )
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_attention_layer_norm/beta' )
# Intermediate
a__ : BertIntermediate = layer.intermediate
a__ : Tuple = get_encoder_layer_array(A__ , '_intermediate_dense/kernel' )
a__ : Tuple = get_encoder_layer_array(A__ , '_intermediate_dense/bias' )
# Output
a__ : BertOutput = layer.output
a__ : Optional[Any] = get_encoder_layer_array(A__ , '_output_dense/kernel' )
a__ : List[str] = get_encoder_layer_array(A__ , '_output_dense/bias' )
a__ : Optional[int] = get_encoder_layer_array(A__ , '_output_layer_norm/gamma' )
a__ : Optional[int] = get_encoder_layer_array(A__ , '_output_layer_norm/beta' )
# Embeddings
a__ : List[str] = get_encoder_array('_position_embedding_layer/embeddings' )
a__ : Dict = get_encoder_array('_type_embedding_layer/embeddings' )
a__ : Dict = get_encoder_array('_embedding_norm_layer/gamma' )
a__ : List[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a__ : Tuple = model.cls.predictions.transform
a__ : List[Any] = get_masked_lm_array('dense/kernel' )
a__ : Tuple = get_masked_lm_array('dense/bias' )
a__ : List[str] = get_masked_lm_array('layer_norm/gamma' )
a__ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
a__ : Union[str, Any] = get_masked_lm_array('embedding_table' )
# Pooling
a__ : Any = BertPooler(config=A__ )
a__ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
a__ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(A__ )
# Integration test - should load without any errors ;)
a__ : Any = BertForMaskedLM.from_pretrained(A__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 99 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt"}
_lowerCamelCase ={
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase ={
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_, 'r' ) as f:
SCREAMING_SNAKE_CASE =f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int ,snake_case : Dict ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<cls>" ,snake_case : Optional[int]="<pad>" ,snake_case : int="<mask>" ,snake_case : Optional[int]="<eos>" ,**snake_case : List[str] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =load_vocab_file(snake_case )
SCREAMING_SNAKE_CASE =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE =unk_token
SCREAMING_SNAKE_CASE =cls_token
SCREAMING_SNAKE_CASE =pad_token
SCREAMING_SNAKE_CASE =mask_token
SCREAMING_SNAKE_CASE =eos_token
SCREAMING_SNAKE_CASE =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,**snake_case : Any ):
return text.split()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : str=False ):
return len(self._id_to_token )
def _lowerCAmelCase ( self : List[str] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ):
return self._token_to_id.get(snake_case ,self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : int ):
return self._id_to_token.get(snake_case ,self.unk_token )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE =[self.cls_token_id]
SCREAMING_SNAKE_CASE =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List ,snake_case : Optional[List] = None ,snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE =[1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case ) + [1]
return mask
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =os.path.join(snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self : int ):
return self.get_vocab_size(with_added_tokens=snake_case )
def _lowerCAmelCase ( self : str ,snake_case : Union[List[str], List[AddedToken]] ,snake_case : bool = False ):
return super()._add_tokens(snake_case ,special_tokens=snake_case )
| 334 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = get_activation('swish')
self.assertIsInstance(_UpperCAmelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = get_activation('silu')
self.assertIsInstance(_UpperCAmelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = get_activation('mish')
self.assertIsInstance(_UpperCAmelCase , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = get_activation('gelu')
self.assertIsInstance(_UpperCAmelCase , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) | 190 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 190 | 1 |
"""simple docstring"""
class a__ :
def __init__( self , _a , _a , _a ):
lowercase : Optional[Any] = name
lowercase : List[str] = value
lowercase : List[str] = weight
def __repr__( self ):
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __magic_name__ ( self ):
return self.value
def __magic_name__ ( self ):
return self.name
def __magic_name__ ( self ):
return self.weight
def __magic_name__ ( self ):
return self.value / self.weight
def __magic_name__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : int ) -> str:
lowercase : Union[str, Any] = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : int ) -> Dict:
lowercase : Optional[int] = sorted(__snake_case , key=__snake_case , reverse=__snake_case )
lowercase : Dict = []
lowercase , lowercase : Optional[int] = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __magic_name__ ( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[int] = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_A : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 1 |
def A__ ( lowerCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 355 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
set_seed(7_70)
lowerCamelCase_ : str = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowerCamelCase_ : Any = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowerCamelCase_ : str = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase_ : Any = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCamelCase_ : Dict = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def A__ ( lowerCamelCase , lowerCamelCase=False ) -> int:
UpperCamelCase_: Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
hf_hub_download(repo_id=lowerCamelCase , filename=lowerCamelCase , local_dir=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Optional[int]:
if model_type == "text":
UpperCamelCase_: str = BarkSemanticModel
UpperCamelCase_: Dict = BarkSemanticConfig
UpperCamelCase_: int = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase_: str = BarkCoarseModel
UpperCamelCase_: int = BarkCoarseConfig
UpperCamelCase_: Any = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase_: Optional[Any] = BarkFineModel
UpperCamelCase_: int = BarkFineConfig
UpperCamelCase_: Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase_: str = F'''{model_type}_small''' if use_small else model_type
UpperCamelCase_: List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
UpperCamelCase_: int = torch.load(lowerCamelCase , map_location=lowerCamelCase )
# this is a hack
UpperCamelCase_: Tuple = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
UpperCamelCase_: int = model_args["""vocab_size"""]
UpperCamelCase_: Optional[int] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase_: Tuple = model_args.pop("""n_head""" )
UpperCamelCase_: Dict = model_args.pop("""n_embd""" )
UpperCamelCase_: List[str] = model_args.pop("""n_layer""" )
UpperCamelCase_: Optional[Any] = ConfigClass(**checkpoint["""model_args"""] )
UpperCamelCase_: Optional[Any] = ModelClass(config=lowerCamelCase )
UpperCamelCase_: List[Any] = GenerationConfigClass()
UpperCamelCase_: Optional[Any] = model_generation_config
UpperCamelCase_: Optional[int] = checkpoint["""model"""]
# fixup checkpoint
UpperCamelCase_: Dict = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase_: Optional[int] = k[len(lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase_: Dict = new_k.replace(lowerCamelCase , new_layer_name_dict[old_layer_name] )
UpperCamelCase_: List[str] = state_dict.pop(lowerCamelCase )
UpperCamelCase_: Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCamelCase_: Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
UpperCamelCase_: Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCamelCase_: Union[str, Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCamelCase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(lowerCamelCase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
UpperCamelCase_: str = model.num_parameters(exclude_embeddings=lowerCamelCase )
UpperCamelCase_: int = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase , 3 )} loss''' )
model.eval()
model.to(lowerCamelCase )
del checkpoint, state_dict
return model
def A__ ( lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Any:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase_: Union[str, Any] = """cpu""" # do conversion on cpu
UpperCamelCase_: int = _get_ckpt_path(lowerCamelCase , use_small=lowerCamelCase )
UpperCamelCase_: Dict = _load_model(lowerCamelCase , lowerCamelCase , model_type=lowerCamelCase , use_small=lowerCamelCase )
# load bark initial model
UpperCamelCase_: List[Any] = _bark_load_model(lowerCamelCase , """cpu""" , model_type=lowerCamelCase , use_small=lowerCamelCase )
if model_type == "text":
UpperCamelCase_: Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
UpperCamelCase_: Optional[Any] = 5
UpperCamelCase_: List[str] = 10
if model_type in ["text", "coarse"]:
UpperCamelCase_: int = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
UpperCamelCase_: Tuple = bark_model(lowerCamelCase )[0]
UpperCamelCase_: Optional[Any] = model(lowerCamelCase )
# take last logits
UpperCamelCase_: Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase_: Tuple = 3
UpperCamelCase_: List[Any] = 8
UpperCamelCase_: List[str] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCamelCase_: int = model(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Any = bark_model(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str:
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: Optional[int] = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: Any = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
UpperCamelCase_: Optional[Any] = BarkSemanticModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: Tuple = BarkCoarseModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = BarkFineModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: Tuple = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
UpperCamelCase_: int = BarkConfig.from_sub_model_configs(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCamelCase_: Optional[Any] = BarkModel(lowerCamelCase )
UpperCamelCase_: int = semantic
UpperCamelCase_: Tuple = coarseAcoustic
UpperCamelCase_: Optional[int] = fineAcoustic
UpperCamelCase_: Any = codec
UpperCamelCase_: Dict = bark_generation_config
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
bark.save_pretrained(lowerCamelCase , repo_id=lowerCamelCase , push_to_hub=lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCamelCase_ : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 223 | 0 |
'''simple docstring'''
from __future__ import annotations
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 2
UpperCamelCase__ :Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : Any ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def _lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaises(snake_case ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : int ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : Dict ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def _lowerCAmelCase ( self : int ):
import PIL.Image
SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_, pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCAmelCase_, features=lowerCAmelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pa.ipc.open_stream(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =f.read_all()
SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1}, key=10 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('writer_batch_size', [None, 2, 10] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_, hash_salt='split_name', check_duplicates=lowerCAmelCase_, ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1}, key=1 )
writer.write({'col_1': 'bar', 'col_2': 2}, key=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size', [None, 1, 10] )
@pytest.mark.parametrize(
'fields', [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE =pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_, schema=lowerCAmelCase_, writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def snake_case__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE ={'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, 'test.arrow' )
with ArrowWriter(path=lowerCAmelCase_, schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_, metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_, 1 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lst[0], lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0], lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('optimized_int_type, expected_dtype', [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(TypedSequence(lowerCAmelCase_, optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype', [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
], )
@pytest.mark.parametrize('sequence', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE =copy.deepcopy(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(lowerCAmelCase_, col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ='mock://dataset-train.arrow'
with ArrowWriter(path=lowerCAmelCase_, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files', [False, True] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE =str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(lowerCAmelCase_, format='png' )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_, features=Features({'image': Image()} ), embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE =pq.read_table(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'], lowerCAmelCase_ )
with open(lowerCAmelCase_, 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =pa.schema([pa.field('col_1', pa.string(), nullable=lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field('col_1', pa.string() )] )
| 334 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : str = """CLIPImageProcessor"""
UpperCamelCase_ : int = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Dict=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
'''simple docstring'''
A: Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
A: Union[str, Any] = kwargs.pop('''feature_extractor''' )
A: List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A: str = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
A: List[str] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
A: Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
A: Any = self.tokenizer.model_input_names
A: Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 334 |
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334 | 1 |
'''simple docstring'''
_lowercase : Optional[int] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 93 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Optional[Any]=6 , UpperCamelCase_: Any=17 , UpperCamelCase_: str=23 , UpperCamelCase_: List[Any]=11 , UpperCamelCase_: Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
lowercase__ = DecisionTransformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (DecisionTransformerModel,) if is_torch_available() else ()
_lowercase : List[str] = ()
_lowercase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowercase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowercase : Tuple = False
_lowercase : str = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ = model.to(UpperCamelCase_ )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCamelCase_ )
lowercase__ = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase__ = state
lowercase__ = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase_ ):
lowercase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ = model(
states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state] , dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 110 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Dict , A : List[str]=13 , A : List[str]=7 , A : Optional[Any]=True , A : Any=True , A : List[Any]=True , A : List[Any]=True , A : Dict=99 , A : Union[str, Any]=32 , A : Tuple=2 , A : Any=4 , A : Optional[int]=37 , A : Optional[int]="gelu" , A : Any=0.1 , A : Any=0.1 , A : Any=5_12 , A : str=16 , A : List[Any]=2 , A : Tuple=0.0_2 , A : str=3 , A : str=4 , A : str=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 3_84
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 5_12
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.0_2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = 1_28
_UpperCAmelCase = 2
_UpperCAmelCase = 9
_UpperCAmelCase = 1
_UpperCAmelCase = None
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[int] , A : Dict , A : List[str] , A : Dict , A : List[str] , A : List[str] , A : Union[str, Any] , A : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFConvBertModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : Tuple , A : Any , A : Optional[int] , A : Tuple , A : Any , A : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFConvBertForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[str] , A : Optional[int] , A : Any , A : int , A : List[Any] , A : Optional[int] , A : List[Any] , A : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : int , A : str , A : List[str] , A : List[str] , A : Optional[Any] , A : int , A : str , A : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFConvBertForMultipleChoice(config=A)
_UpperCAmelCase = tf.tile(tf.expand_dims(A , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = tf.tile(tf.expand_dims(A , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = tf.tile(tf.expand_dims(A , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : Optional[Any] , A : Dict , A : Optional[Any] , A : List[str] , A : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Any , A : str , A : List[str] , A : Optional[int] , A : int , A : Optional[Any] , A : Optional[int] , A : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFConvBertForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFConvBertModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = True
if hasattr(A , 'use_cache'):
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , A)
for model_class in self.all_model_classes:
_UpperCAmelCase = self._prepare_for_class(A , A)
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = len(model(A))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A , saved_model=A)
_UpperCAmelCase = os.path.join(A , 'saved_model' , '1')
_UpperCAmelCase = tf.keras.models.load_model(A)
_UpperCAmelCase = model(A)
if self.is_encoder_decoder:
_UpperCAmelCase = outputs['encoder_hidden_states']
_UpperCAmelCase = outputs['encoder_attentions']
else:
_UpperCAmelCase = outputs['hidden_states']
_UpperCAmelCase = outputs['attentions']
self.assertEqual(len(A) , A)
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(A) , A)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(A)
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , A)
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , A)
def check_decoder_attentions_output(A : Optional[int]):
_UpperCAmelCase = len(A)
self.assertEqual(out_len % 2 , 0)
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(A) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A : List[str]):
_UpperCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = model(self._prepare_for_class(A , A))
_UpperCAmelCase = len(A)
self.assertEqual(config.output_hidden_states , A)
check_encoder_attentions_output(A)
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = model(self._prepare_for_class(A , A))
self.assertEqual(config.output_hidden_states , A)
check_decoder_attentions_output(A)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = model(self._prepare_for_class(A , A))
self.assertEqual(config.output_hidden_states , A)
check_encoder_attentions_output(A)
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = model(self._prepare_for_class(A , A))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A))
self.assertEqual(model.config.output_hidden_states , A)
check_encoder_attentions_output(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = [1, 6, 7_68]
self.assertEqual(output.shape , A)
_UpperCAmelCase = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
])
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1E-4)
| 290 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = RoFormerTokenizer
def __init__( self : Dict , A : Any=None , A : Optional[Any]=None , A : Union[str, Any]=True , A : List[str]="[UNK]" , A : List[str]="[SEP]" , A : Union[str, Any]="[PAD]" , A : Any="[CLS]" , A : str="[MASK]" , A : Optional[int]=True , A : str=None , **A : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , A) != do_lower_case
or pre_tok_state.get('strip_accents' , A) != strip_accents
):
_UpperCAmelCase = getattr(A , pre_tok_state.pop('type'))
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = pre_tok_class(**A)
_UpperCAmelCase = do_lower_case
def __getstate__( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : List[Any] , A : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = d
_UpperCAmelCase = self.__dict__['_tokenizer'].get_vocab()
_UpperCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(A))
def _lowerCamelCase ( self : List[Any] , A : Optional[int] , A : List[Any]=None) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Union[str, Any] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : str=None , A : Union[str, Any]=None , A : Union[str, Any]=False , **A : Tuple , ) -> str:
"""simple docstring"""
_UpperCAmelCase = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A)
| 290 | 1 |
import re
import string
import numpy as np
import datasets
_snake_case = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
_snake_case = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
_snake_case = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , __A , __A , __A=None , __A=False , __A=False , __A=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase : Any = np.array([re.sub(__A , "" , __A ) for x in predictions] )
lowerCamelCase : List[Any] = np.array([re.sub(__A , "" , __A ) for x in references] )
else:
lowerCamelCase : Tuple = np.asarray(__A )
lowerCamelCase : Dict = np.asarray(__A )
if ignore_case:
lowerCamelCase : Optional[Any] = np.char.lower(__A )
lowerCamelCase : List[str] = np.char.lower(__A )
if ignore_punctuation:
lowerCamelCase : List[str] = string.punctuation.maketrans("" , "" , string.punctuation )
lowerCamelCase : Any = np.char.translate(__A , table=__A )
lowerCamelCase : List[Any] = np.char.translate(__A , table=__A )
if ignore_numbers:
lowerCamelCase : Any = string.digits.maketrans("" , "" , string.digits )
lowerCamelCase : Optional[int] = np.char.translate(__A , table=__A )
lowerCamelCase : Any = np.char.translate(__A , table=__A )
lowerCamelCase : int = predictions == references
return {"exact_match": np.mean(__A ) * 100}
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class snake_case_ :
def __init__( self : Tuple , lowercase_ : bytes ) -> None:
lowercase__ : Optional[int] = data
# Initialize hash values
lowercase__ : Optional[Any] = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
lowercase__ : Optional[Any] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
lowercase__ : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCamelCase ( lowercase_ : bytes ) -> bytes:
lowercase__ : List[Any] = b"\x80" + (b"\x00" * (63 - (len(lowercase_ ) + 8) % 64))
lowercase__ : int = struct.pack(">Q" , (len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Optional[Any] ) -> None:
# Convert into blocks of 64 bytes
lowercase__ : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase__ : int = list(struct.unpack(">16L" , lowercase_ ) )
# add 48 0-ed integers
words += [0] * 48
lowercase__ : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase__ : Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowercase__ : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowercase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowercase__ : str = self.ror(lowercase_ , 6 ) ^ self.ror(lowercase_ , 11 ) ^ self.ror(lowercase_ , 25 )
lowercase__ : List[str] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
lowercase__ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowercase__ : Union[str, Any] = self.ror(lowercase_ , 2 ) ^ self.ror(lowercase_ , 13 ) ^ self.ror(lowercase_ , 22 )
lowercase__ : List[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase__ : Union[str, Any] = (sa + maj) % 0x1_00_00_00_00
lowercase__ : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowercase__ : int = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase__ : Tuple = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowercase__ : List[Any] = "".join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int ) -> int:
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ) -> None:
import hashlib
lowercase__ : Optional[int] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowercase_ ).hash , hashlib.shaaaa(lowercase_ ).hexdigest() )
def lowercase_ ( ):
import doctest
doctest.testmod()
lowercase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file")
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Optional[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb") as f:
lowercase__ : Optional[int] = f.read()
else:
lowercase__ : Union[str, Any] = bytes(_lowerCamelCase , "utf-8")
print(SHAaaa(_lowerCamelCase).hash)
if __name__ == "__main__":
main()
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCamelCase ( lowercase__ : str, lowercase__ : List[Any]=0.999, lowercase__ : List[Any]="cosine", ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__lowercase =[]
for i in range(lowercase__ ):
__lowercase =i / num_diffusion_timesteps
__lowercase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ), lowercase__ ) )
return torch.tensor(lowercase__, dtype=torch.floataa )
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase_ = 2
@register_to_config
def __init__( self : List[str] , __lowercase : int = 1000 , __lowercase : float = 0.0_0_0_8_5 , __lowercase : float = 0.0_1_2 , __lowercase : str = "linear" , __lowercase : Optional[Union[np.ndarray, List[float]]] = None , __lowercase : str = "epsilon" , __lowercase : str = "linspace" , __lowercase : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__lowercase =torch.tensor(__lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase =torch.linspace(__lowercase , __lowercase , __lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase =betas_for_alpha_bar(__lowercase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__lowercase =1.0 - self.betas
__lowercase =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowercase , __lowercase , __lowercase )
def snake_case ( self : List[str] , __lowercase : str , __lowercase : List[str]=None ):
"""simple docstring"""
if schedule_timesteps is None:
__lowercase =self.timesteps
__lowercase =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowercase =1 if len(__lowercase ) > 1 else 0
else:
__lowercase =timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
__lowercase =self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case ( self : str ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case ( self : Optional[Any] , __lowercase : torch.FloatTensor , __lowercase : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
__lowercase =self.index_for_timestep(__lowercase )
if self.state_in_first_order:
__lowercase =self.sigmas[step_index]
else:
__lowercase =self.sigmas_interpol[step_index]
__lowercase =sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case ( self : Dict , __lowercase : int , __lowercase : Union[str, torch.device] = None , __lowercase : Optional[int] = None , ):
"""simple docstring"""
__lowercase =num_inference_steps
__lowercase =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase =np.linspace(0 , num_train_timesteps - 1 , __lowercase , dtype=__lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase =(np.arange(0 , __lowercase ) * step_ratio).round()[::-1].copy().astype(__lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase =(np.arange(__lowercase , 0 , -step_ratio )).round().copy().astype(__lowercase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__lowercase =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowercase =torch.from_numpy(np.log(__lowercase ) ).to(__lowercase )
__lowercase =np.interp(__lowercase , np.arange(0 , len(__lowercase ) ) , __lowercase )
__lowercase =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowercase =torch.from_numpy(__lowercase ).to(device=__lowercase )
# interpolate sigmas
__lowercase =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowercase =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowercase =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowercase ).startswith('mps' ):
# mps does not support float64
__lowercase =torch.from_numpy(__lowercase ).to(__lowercase , dtype=torch.floataa )
else:
__lowercase =torch.from_numpy(__lowercase ).to(__lowercase )
# interpolate timesteps
__lowercase =self.sigma_to_t(__lowercase ).to(__lowercase , dtype=timesteps.dtype )
__lowercase =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowercase =torch.cat([timesteps[:1], interleaved_timesteps] )
__lowercase =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase =defaultdict(__lowercase )
def snake_case ( self : List[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
__lowercase =sigma.log()
# get distribution
__lowercase =log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowercase =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowercase =low_idx + 1
__lowercase =self.log_sigmas[low_idx]
__lowercase =self.log_sigmas[high_idx]
# interpolate sigmas
__lowercase =(low - log_sigma) / (low - high)
__lowercase =w.clamp(0 , 1 )
# transform interpolation to time range
__lowercase =(1 - w) * low_idx + w * high_idx
__lowercase =t.view(sigma.shape )
return t
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.sample is None
def snake_case ( self : Union[str, Any] , __lowercase : Union[torch.FloatTensor, np.ndarray] , __lowercase : Union[float, torch.FloatTensor] , __lowercase : Union[torch.FloatTensor, np.ndarray] , __lowercase : bool = True , ):
"""simple docstring"""
__lowercase =self.index_for_timestep(__lowercase )
# advance index counter by 1
__lowercase =timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase =self.sigmas[step_index]
__lowercase =self.sigmas_interpol[step_index + 1]
__lowercase =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowercase =self.sigmas[step_index - 1]
__lowercase =self.sigmas_interpol[step_index]
__lowercase =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase =0
__lowercase =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase =sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase =sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase =sigma_interpol - sigma_hat
# store for 2nd order step
__lowercase =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowercase =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowercase =sigma_next - sigma_hat
__lowercase =self.sample
__lowercase =None
__lowercase =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def snake_case ( self : Dict , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , ):
"""simple docstring"""
__lowercase =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowercase ):
# mps does not support float64
__lowercase =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowercase =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowercase =self.timesteps.to(original_samples.device )
__lowercase =timesteps.to(original_samples.device )
__lowercase =[self.index_for_timestep(__lowercase , __lowercase ) for t in timesteps]
__lowercase =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowercase =sigma.unsqueeze(-1 )
__lowercase =original_samples + noise * sigma
return noisy_samples
def __len__( self : str ):
"""simple docstring"""
return self.config.num_train_timesteps
| 141 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaImgaImgPipeline
lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "image"]
lowerCAmelCase_ = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCAmelCase_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ = False
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return 32
@property
def snake_case ( self : Any ):
"""simple docstring"""
return 32
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim
@property
def snake_case ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return 100
@property
def snake_case ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__lowercase ={
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowercase =UNetaDConditionModel(**__lowercase )
return model
@property
def snake_case ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__lowercase =VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =self.dummy_unet
__lowercase =self.dummy_movq
__lowercase ={
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__lowercase =DDIMScheduler(**__lowercase )
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : int=0 ):
"""simple docstring"""
__lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
__lowercase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase =Image.fromarray(np.uinta(__lowercase ) ).convert('RGB' ).resize((256, 256) )
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase ={
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase ='cpu'
__lowercase =self.get_dummy_components()
__lowercase =self.pipeline_class(**__lowercase )
__lowercase =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowercase =pipe(**self.get_dummy_inputs(__lowercase ) )
__lowercase =output.images
__lowercase =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__lowercase =image[0, -3:, -3:, -1]
__lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase =np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowercase ='A red cartoon frog, 4k'
__lowercase =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
__lowercase =KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
__lowercase =pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
__lowercase =torch.Generator(device='cpu' ).manual_seed(0 )
__lowercase , __lowercase =pipe_prior(
__lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowercase =pipeline(
image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__lowercase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase )
| 141 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=True , A__=False , A__=False , A__=False , A__=2 , A__=99 , A__=0 , A__=32 , A__=5 , A__=4 , A__=0.1 , A__=0.1 , A__=512 , A__=12 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__="last" , A__=None , A__=None , ):
A__ : List[str] = parent
A__ : int = batch_size
A__ : Optional[int] = seq_length
A__ : int = is_training
A__ : Union[str, Any] = use_input_lengths
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : Any = gelu_activation
A__ : Union[str, Any] = sinusoidal_embeddings
A__ : Dict = causal
A__ : str = asm
A__ : List[Any] = n_langs
A__ : Any = vocab_size
A__ : Dict = n_special
A__ : Union[str, Any] = hidden_size
A__ : Dict = num_hidden_layers
A__ : str = num_attention_heads
A__ : Union[str, Any] = hidden_dropout_prob
A__ : List[Any] = attention_probs_dropout_prob
A__ : Optional[Any] = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : Optional[int] = type_sequence_label_size
A__ : Union[str, Any] = initializer_range
A__ : Union[str, Any] = num_labels
A__ : Tuple = num_choices
A__ : Optional[Any] = summary_type
A__ : List[Any] = use_proj
A__ : Any = scope
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[Any] = None
if self.use_input_lengths:
A__ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : int = None
if self.use_token_type_ids:
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Dict = None
A__ : Dict = None
A__ : Dict = None
if self.use_labels:
A__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float()
A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __A ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[Any] = FlaubertModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ , lengths=A__ , langs=A__ )
A__ : str = model(A__ , langs=A__ )
A__ : Tuple = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Any = FlaubertWithLMHeadModel(A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Union[str, Any] = FlaubertForQuestionAnsweringSimple(A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ )
A__ : List[Any] = model(A__ , start_positions=A__ , end_positions=A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[str] = FlaubertForQuestionAnswering(A__ )
model.to(A__ )
model.eval()
A__ : str = model(A__ )
A__ : int = model(
A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , p_mask=A__ , )
A__ : Optional[int] = model(
A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , )
(A__ ) : int = result_with_labels.to_tuple()
A__ : Union[str, Any] = model(A__ , start_positions=A__ , end_positions=A__ )
(A__ ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Any = FlaubertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(A__ )
A__ : Tuple = model(A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : str = self.num_labels
A__ : Any = FlaubertForTokenClassification(A__ )
model.to(A__ )
model.eval()
A__ : Dict = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Optional[int] = self.num_choices
A__ : List[Any] = FlaubertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Union[str, Any] = self.prepare_config_and_inputs()
(
A__
) : Any = config_and_inputs
A__ : Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: Optional[int] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __A ( self , A__ , A__ , A__=False ):
A__ : Optional[int] = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
A__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : int = FlaubertModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=A__ , emb_dim=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A__ )
@slow
def __A ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict = FlaubertModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A__ : Optional[Any] = True
A__ : List[str] = model_class(config=A__ )
A__ : List[Any] = self._prepare_for_class(A__ , A__ )
A__ : int = torch.jit.trace(
A__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , """traced_model.pt""" ) )
A__ : Optional[int] = torch.jit.load(os.path.join(A__ , """traced_model.pt""" ) , map_location=A__ )
loaded(inputs_dict["""input_ids"""].to(A__ ) , inputs_dict["""attention_mask"""].to(A__ ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : List[Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
A__ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
A__ : Optional[int] = model(A__ )[0]
A__ : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A__ )
A__ : int = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
| 354 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = KandinskyVaaImgaImgPipeline
UpperCAmelCase__: Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__: str = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__: int = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__: Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Dict = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ : List[str] = UNetaDConditionModel(**A__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
A__ : Optional[int] = self.dummy_unet
A__ : Dict = self.dummy_movq
A__ : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A__ : List[str] = DDIMScheduler(**A__ )
A__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , A__ , A__=0 ):
A__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
A__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
A__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
A__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Dict = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(A__ ).startswith("""mps""" ):
A__ : Any = torch.manual_seed(A__ )
else:
A__ : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
A__ : str = """cpu"""
A__ : Any = self.get_dummy_components()
A__ : Union[str, Any] = self.pipeline_class(**A__ )
A__ : List[str] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : Dict = pipe(**self.get_dummy_inputs(A__ ) )
A__ : Any = output.images
A__ : List[str] = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
A__ : Optional[int] = image[0, -3:, -3:, -1]
A__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ : str = """A red cartoon frog, 4k"""
A__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
A__ : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A__ : List[str] = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
A__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ : Optional[Any] = pipe_prior(
A__ , generator=A__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ : str = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , generator=A__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
| 141 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ProphetNetTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _UpperCamelCase ( self ) -> str:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 299 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int , __magic_name__ : Dict , __magic_name__ : Any=13 , __magic_name__ : Any=30 , __magic_name__ : List[str]=2 , __magic_name__ : Tuple=3 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=2 , __magic_name__ : Dict=4 , __magic_name__ : str=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : Union[str, Any]=0.6 , __magic_name__ : Optional[Any]=None , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = mask_ratio
SCREAMING_SNAKE_CASE_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[Any] ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFViTMAEModel(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , training=__magic_name__ )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE_ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining(__magic_name__ )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , training=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCamelCase__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : str ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __A ( self : List[Any] ) -> Dict:
pass
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , tf.keras.layers.Layer ) )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __A ( self : List[Any] ) -> List[str]:
# make the mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , noise=__magic_name__ )
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , noise=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __A ( self : Tuple ) -> Union[str, Any]:
# make the mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__magic_name__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = v.numpy()
else:
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = prepare_numpy_arrays(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , noise=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , noise=__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
def __A ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Optional[int]:
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ = tf.constant(__magic_name__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE_ = tf_noise
super().check_pt_tf_models(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : int ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__magic_name__ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(__magic_name__ , __magic_name__ ),)
if isinstance(__magic_name__ , __magic_name__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__magic_name__ , "_keras_serializable" , __magic_name__ )
}
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(__magic_name__ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE_ = main_layer_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE_ = tf.keras.Model(__magic_name__ , outputs=main_layer(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , "keras_model.h5" )
model.save(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.keras.models.load_model(
__magic_name__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__magic_name__ , tf.keras.Model )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
@slow
def __A ( self : Any ) -> str:
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , noise=__magic_name__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = outputs.logits.numpy()
SCREAMING_SNAKE_CASE_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ , saved_model=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , noise=__magic_name__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE_ = after_outputs["last_hidden_state"].numpy()
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = after_outputs["logits"].numpy()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
def __A ( self : Optional[int] ) -> Dict:
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , noise=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE_ = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE_ = new_model(__magic_name__ ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE_ = new_model(__magic_name__ , noise=__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __A ( self : str ) -> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __A ( self : List[str] ) -> Tuple:
pass
@slow
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> int:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __A ( self : str ) -> Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE_ = ViTMAEConfig()
SCREAMING_SNAKE_CASE_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , noise=__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
| 305 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowercase (datasets.BuilderConfig ):
_UpperCamelCase = None
_UpperCamelCase = "utf-8"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True # deprecated
_UpperCamelCase = None # deprecated
_UpperCamelCase = 10 << 20 # 10MB
_UpperCamelCase = None
class __lowercase (datasets.ArrowBasedBuilder ):
_UpperCamelCase = JsonConfig
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__lowerCAmelCase : Dict = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
__lowerCAmelCase : Tuple = data_files
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase : List[str] = [files]
__lowerCAmelCase : Any = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowerCAmelCase : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
__lowerCAmelCase : Tuple = [files]
__lowerCAmelCase : Optional[int] = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase__ ( self , A_ ) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowerCAmelCase : Tuple = self.config.features.arrow_schema.field(__snake_case ).type
__lowerCAmelCase : List[Any] = pa_table.append_column(__snake_case , pa.array([None] * len(__snake_case ) , type=__snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase : List[str] = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , A_ ) ->List[str]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase : Dict = json.load(__snake_case )
# We keep only the field we are interested in
__lowerCAmelCase : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__snake_case , (list, tuple) ):
__lowerCAmelCase : Tuple = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase : Tuple = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
else:
__lowerCAmelCase : Dict = dataset
__lowerCAmelCase : Dict = pa.Table.from_pydict(__snake_case )
yield file_idx, self._cast_table(__snake_case )
# If the file has one json object per line
else:
with open(__snake_case , '''rb''' ) as f:
__lowerCAmelCase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowerCAmelCase : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
__lowerCAmelCase : Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__lowerCAmelCase : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowerCAmelCase : str = batch.decode(self.config.encoding , errors=__snake_case ).encode('''utf-8''' )
try:
while True:
try:
__lowerCAmelCase : List[str] = paj.read_json(
io.BytesIO(__snake_case ) , read_options=paj.ReadOptions(block_size=__snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__snake_case , pa.ArrowInvalid )
and "straddling" not in str(__snake_case )
or block_size > len(__snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(__snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCAmelCase : Any = json.load(__snake_case )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(__snake_case )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__snake_case , __snake_case ): # list is the only sequence type supported in JSON
try:
__lowerCAmelCase : List[Any] = set().union(*[row.keys() for row in dataset] )
__lowerCAmelCase : Dict = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
__lowerCAmelCase : str = pa.Table.from_pydict(__snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(__snake_case )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__snake_case )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(__snake_case )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__snake_case )
batch_idx += 1
| 275 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) )
class UpperCAmelCase :
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_encoder_blocks
_lowerCAmelCase = sr_ratios
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = downsampling_rates
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[str]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple:
_lowerCAmelCase = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase: Tuple = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase: Tuple = True
_lowercase: Union[str, Any] = False
_lowercase: Dict = False
_lowercase: Optional[Any] = False
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = SegformerModelTester(self )
_lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Union[str, Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowercase__ ( self : Optional[int] ) -> int:
pass
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_lowerCAmelCase = len(__snake_case )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : int ) -> List[str]:
def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ):
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Optional[Any] ) -> Any:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = model(**__snake_case ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Tuple ) -> Dict:
pass
@slow
def lowercase__ ( self : str ) -> Optional[int]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) )
@slow
def lowercase__ ( self : Any ) -> str:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = outputs.logits.detach().cpu()
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
_lowerCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_lowerCAmelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 70 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : Any = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Any = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : str = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Any = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
_UpperCAmelCase : int = """3.0.12"""
_UpperCAmelCase : Optional[int] = None
def __magic_name__( ):
global _logger
__lowerCAmelCase = _logger or logging.getLogger(__name__)
return _logger
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock_file
return None
def __str__(self ):
__lowerCAmelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = lock
return None
def __enter__(self ):
return self.lock
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.lock.release()
return None
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__lowerCAmelCase = self.hash_filename_if_too_long(__lowercase , __lowercase )
# The path to the lock file.
__lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCAmelCase = None
# The default timeout value.
__lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCAmelCase = 0
return None
@property
def _snake_case (self ):
return self._lock_file
@property
def _snake_case (self ):
return self._timeout
@timeout.setter
def _snake_case (self , __lowercase ):
__lowerCAmelCase = float(__lowercase )
return None
def _snake_case (self ):
raise NotImplementedError()
def _snake_case (self ):
raise NotImplementedError()
@property
def _snake_case (self ):
return self._lock_file_fd is not None
def _snake_case (self , __lowercase=None , __lowercase=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
__lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__lowercase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _snake_case (self , __lowercase=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCAmelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
self.acquire()
return self
def __exit__(self , __lowercase , __lowercase , __lowercase ):
self.release()
return None
def __del__(self ):
self.release(force=__lowercase )
return None
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = os.path.basename(__lowercase )
if len(__lowercase ) > max_length and max_length > 0:
__lowerCAmelCase = os.path.dirname(__lowercase )
__lowerCAmelCase = str(hash(__lowercase ) )
__lowerCAmelCase = filename[: max_length - len(__lowercase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__lowercase , __lowercase )
else:
return path
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
__lowerCAmelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowercase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
msvcrt.locking(__lowercase , msvcrt.LK_UNLCK , 1 )
os.close(__lowercase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=-1 , __lowercase=None ):
__lowerCAmelCase = os.statvfs(os.path.dirname(__lowercase ) ).f_namemax
super().__init__(__lowercase , timeout=__lowercase , max_filename_length=__lowercase )
def _snake_case (self ):
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
try:
fcntl.flock(__lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowercase )
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
fcntl.flock(__lowercase , fcntl.LOCK_UN )
os.close(__lowercase )
return None
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , __lowercase )
except OSError:
pass
else:
__lowerCAmelCase = fd
return None
def _snake_case (self ):
os.close(self._lock_file_fd )
__lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Any = None
if msvcrt:
_UpperCAmelCase : Any = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A: str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_SCREAMING_SNAKE_CASE , speech_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
'''simple docstring'''
if slice_size == "auto":
UpperCAmelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = self.speech_processor.feature_extractor(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , sampling_rate=_SCREAMING_SNAKE_CASE ).input_features.to(self.device )
UpperCAmelCase : Union[str, Any] = self.speech_model.generate(_SCREAMING_SNAKE_CASE , max_length=480000 )
UpperCAmelCase : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , normalize=_SCREAMING_SNAKE_CASE )[
0
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
# get prompt text embeddings
UpperCAmelCase : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = text_embeddings.shape
UpperCAmelCase : Optional[Any] = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
UpperCAmelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : str = [""""""] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
F" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : str = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
UpperCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : Dict = uncond_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
UpperCAmelCase : int = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : List[str] = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device="""cpu""" , dtype=_SCREAMING_SNAKE_CASE ).to(
self.device )
else:
UpperCAmelCase : Optional[int] = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCAmelCase : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[int] = {}
if accepts_eta:
UpperCAmelCase : Dict = eta
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : Dict = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCAmelCase : Tuple = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Optional[int] = noise_pred.chunk(2 )
UpperCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Tuple = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = 1 / 0.1_8215 * latents
UpperCAmelCase : Optional[Any] = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : List[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
| 109 |
"""simple docstring"""
A: int = range(2, 2_0 + 1)
A: Any = [1_0**k for k in range(ks[-1] + 1)]
A: dict[int, dict[int, list[list[int]]]] = {}
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int ):
UpperCAmelCase : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
UpperCAmelCase : str = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
UpperCAmelCase , UpperCAmelCase : str = 0, 0
UpperCAmelCase : Optional[Any] = n - i
UpperCAmelCase : Optional[int] = memo.get(UpperCamelCase )
if sub_memo is not None:
UpperCAmelCase : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase : Tuple = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase : int = _k
break
if max_jump >= 0:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase : List[str] = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
UpperCAmelCase : int = []
else:
UpperCAmelCase : List[str] = {c: []}
UpperCAmelCase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase , UpperCAmelCase : List[str] = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase , UpperCAmelCase : int = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase : Dict = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase : str = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase : List[str] = i
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase : Optional[int] = ds_c + ds_b
diff += addend
UpperCAmelCase : str = 0
for j in range(UpperCamelCase ):
UpperCAmelCase : Any = a_i[j] + addend
UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
UpperCAmelCase : Optional[int] = digits[j] + addend
if s >= 10:
UpperCAmelCase , UpperCAmelCase : int = divmod(UpperCamelCase , 10 )
UpperCAmelCase : str = addend // 10 + quotient
else:
UpperCAmelCase : Any = s
UpperCAmelCase : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase , UpperCAmelCase : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def _snake_case ( UpperCamelCase : int = 10**15 ):
UpperCAmelCase : Dict = [1]
UpperCAmelCase : int = 1
UpperCAmelCase : Tuple = 0
while True:
UpperCAmelCase , UpperCAmelCase : Tuple = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase : Any = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__lowerCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = ['input_ids', 'attention_mask']
lowerCAmelCase : List[str] = MBartTokenizer
lowerCAmelCase : List[int] = []
lowerCAmelCase : List[int] = []
def __init__( self : Union[str, Any] ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Optional[int]="<s>" ,_UpperCAmelCase : List[Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : List[str]="<unk>" ,_UpperCAmelCase : str="<pad>" ,_UpperCAmelCase : int="<mask>" ,_UpperCAmelCase : int=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,**_UpperCAmelCase : int ,):
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : str = vocab_file
_a : Dict = False if not self.vocab_file else True
_a : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_a : Dict = {
lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_a : Dict = src_lang if src_lang is not None else 'en_XX'
_a : Any = self.convert_tokens_to_ids(self._src_lang )
_a : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self : Tuple ):
return self._src_lang
@src_lang.setter
def __lowercase ( self : Any ,_UpperCAmelCase : str ):
_a : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : Tuple = src_lang
_a : Dict = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[int] = self.convert_tokens_to_ids(_UpperCAmelCase )
_a : List[str] = tgt_lang_id
return inputs
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en_XX" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro_RO" ,**_UpperCAmelCase : List[Any] ,):
_a : Optional[int] = src_lang
_a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Dict ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ):
_a : Union[str, Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
_a : Any = []
_a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
_a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_a : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
_a : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
_a : List[Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
_a : List[str] = []
_a : int = [self.eos_token_id, self.cur_lang_code]
_a : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
_a : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_a : Dict = os.path.join(
_UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file ,_UpperCAmelCase )
return (out_vocab_file,)
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __magic_name__ :
def __init__( self : Dict ,_UpperCAmelCase : Any ):
_a : Any = data
_a : Node | None = None
class __magic_name__ :
def __init__( self : Any ):
_a : int = None
_a : Optional[int] = None
def __iter__( self : Optional[int] ):
_a : List[Any] = self.head
while self.head:
yield node.data
_a : str = node.next
if node == self.head:
break
def __len__( self : Any ):
return sum(1 for _ in self )
def __repr__( self : int ):
return "->".join(str(_UpperCAmelCase ) for item in iter(self ) )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_a : List[str] = Node(_UpperCAmelCase )
if self.head is None:
_a : Tuple = new_node # first node points itself
_a : int = new_node
elif index == 0: # insert at head
_a : Any = self.head
_a : Tuple = new_node
else:
_a : Any = self.head
for _ in range(index - 1 ):
_a : int = temp.next
_a : Optional[int] = temp.next
_a : int = new_node
if index == len(self ) - 1: # insert at tail
_a : Optional[int] = new_node
def __lowercase ( self : List[Any] ):
return self.delete_nth(0 )
def __lowercase ( self : Dict ):
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_a : Optional[int] = self.head
if self.head == self.tail: # just one node
_a : Optional[int] = None
elif index == 0: # delete head node
_a : Dict = self.tail.next.next
_a : Dict = self.head.next
else:
_a : List[Any] = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : Optional[int] = temp.next
_a : List[str] = temp.next.next
if index == len(self ) - 1: # delete at tail
_a : int = temp
return delete_node.data
def __lowercase ( self : int ):
return len(self ) == 0
def __lowerCamelCase ( ) -> None:
_a : int = CircularLinkedList()
assert len(lowerCAmelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCAmelCase_ ) == i
circular_linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {"vocab_file": "spiece.model"}
lowercase__ : int = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase__ : Any = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
lowercase__ : Dict = "▁"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
__UpperCamelCase = (
AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ , normalized=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else mask_token
)
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = do_lower_case
__UpperCamelCase = remove_space
__UpperCamelCase = keep_accents
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> List[str]:
'''simple docstring'''
return len(self.sp_model )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
if self.remove_space:
__UpperCamelCase = ''' '''.join(inputs.strip().split() )
else:
__UpperCamelCase = inputs
__UpperCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__UpperCamelCase = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE_ )] )
if self.do_lower_case:
__UpperCamelCase = outputs.lower()
return outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
__UpperCamelCase = self.preprocess_text(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase = cur_pieces[1:]
else:
__UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE_ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE_ )
return new_pieces
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = ''''''
__UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__UpperCamelCase = True
__UpperCamelCase = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]:
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]:
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 328 |
from __future__ import annotations
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A_ ( snake_case : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__UpperCamelCase = []
for num in range(len(snake_case ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case ) == n:
return list_nums
return []
def A_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 328 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , """Tatoeba directory does not exist.""" )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
return TatoebaConverter(save_dir=_lowerCAmelCase)
@slow
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'])
@slow
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase , __lowercase =self.resolver.write_model_card('opus-mt-he-en' , dry_run=_lowerCAmelCase)
assert mmeta["long_pair"] == "heb-eng"
| 48 |
'''simple docstring'''
from math import factorial
def _A ( _lowerCAmelCase = 20 ):
"""simple docstring"""
__lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase =n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 48 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
snake_case = Generator(
cache_dir=lowerCAmelCase , features=lowerCAmelCase , generator=lowerCAmelCase , gen_kwargs=lowerCAmelCase , **lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
if self.streaming:
snake_case = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
snake_case = None
snake_case = None
snake_case = None
snake_case = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
snake_case = self.builder.as_dataset(
split='train' , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 150 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_UpperCamelCase , (list, tuple) ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
snake_case = snake_case = snake_case = numbers[0]
for i in range(1 , len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
snake_case = numbers[i]
if number < 0:
snake_case ,snake_case = min_till_now, max_till_now
snake_case = max(_UpperCamelCase , max_till_now * number )
snake_case = min(_UpperCamelCase , min_till_now * number )
# update the maximum product found till now
snake_case = max(_UpperCamelCase , _UpperCamelCase )
return max_prod
| 150 | 1 |
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] ) -> Union[str, Any]:
_A = []
def __A ( self: Any , __A: Tuple ) -> str:
return self.node_position[vertex]
def __A ( self: Any , __A: List[Any] , __A: str ) -> List[str]:
_A = pos
def __A ( self: int , __A: Dict , __A: str , __A: Any , __A: List[Any] ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_A = 2 * start + 1
else:
_A = 2 * start + 2
if heap[smallest_child] < heap[start]:
_A = heap[smallest_child], positions[smallest_child]
_A = (
heap[start],
positions[start],
)
_A = temp, tempa
_A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase__ )
self.top_to_bottom(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self: int , __A: Any , __A: Any , __A: Optional[int] , __A: List[Any] ) -> List[Any]:
_A = position[index]
while index != 0:
_A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_A = heap[parent]
_A = position[parent]
self.set_position(position[parent] , lowerCAmelCase__ )
else:
_A = val
_A = temp
self.set_position(lowerCAmelCase__ , lowerCAmelCase__ )
break
_A = parent
else:
_A = val
_A = temp
self.set_position(lowerCAmelCase__ , 0 )
def __A ( self: Optional[Any] , __A: Dict , __A: List[str] ) -> Any:
_A = len(lowerCAmelCase__ ) // 2 - 1
for i in range(lowerCAmelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase__ , lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self: List[Any] , __A: str , __A: Union[str, Any] ) -> Union[str, Any]:
_A = positions[0]
_A = sys.maxsize
self.top_to_bottom(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return temp
def __A ( _lowercase ) -> Optional[int]:
'''simple docstring'''
_A = Heap()
_A = [0] * len(a_ )
_A = [-1] * len(a_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_A = [] # Heap of Distance of vertices from their neighboring vertex
_A = []
for vertex in range(len(a_ ) ):
distance_tv.append(sys.maxsize )
positions.append(a_ )
heap.node_position.append(a_ )
_A = []
_A = 1
_A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_A = 0
_A = distance
heap.heapify(a_ , a_ )
for _ in range(1 , len(a_ ) ):
_A = heap.delete_minimum(a_ , a_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a_ )]
):
_A = distance
heap.bottom_to_top(
a_ , heap.get_position(a_ ) , a_ , a_ )
_A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input('Enter number of edges: ').strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 367 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(UpperCAmelCase ):
_UpperCAmelCase = []
def UpperCamelCase ( self ):
"""simple docstring"""
return list(self.graph )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if self.graph.get(UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase=-2 , UpperCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return visited
def UpperCamelCase ( self , UpperCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCAmelCase , UpperCAmelCase , 1 )
def UpperCamelCase ( self , UpperCAmelCase=-2 ):
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def UpperCamelCase ( self , UpperCAmelCase=-2 ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(UpperCAmelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return list(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(UpperCAmelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return False
def UpperCamelCase ( self , UpperCAmelCase=-2 , UpperCAmelCase=-1 ):
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = time()
return end - begin
def UpperCamelCase ( self , UpperCAmelCase=-2 ):
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(UpperCAmelCase )
_UpperCAmelCase = time()
return end - begin
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if self.graph.get(UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCAmelCase )
# the other way round
if self.graph.get(UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase=-2 , UpperCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return visited
def UpperCamelCase ( self , UpperCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
_UpperCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCAmelCase , UpperCAmelCase , 1 )
def UpperCamelCase ( self , UpperCAmelCase=-2 ):
"""simple docstring"""
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(UpperCAmelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return list(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(UpperCAmelCase )
visited.append(UpperCAmelCase )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(UpperCAmelCase ) != 0:
_UpperCAmelCase = stack[len(UpperCAmelCase ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(UpperCAmelCase )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(UpperCAmelCase ) == 0:
return False
def UpperCamelCase ( self ):
"""simple docstring"""
return list(self.graph )
def UpperCamelCase ( self , UpperCAmelCase=-2 , UpperCAmelCase=-1 ):
"""simple docstring"""
_UpperCAmelCase = time()
self.dfs(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = time()
return end - begin
def UpperCamelCase ( self , UpperCAmelCase=-2 ):
"""simple docstring"""
_UpperCAmelCase = time()
self.bfs(UpperCAmelCase )
_UpperCAmelCase = time()
return end - begin
| 39 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 0 |
lowerCAmelCase__ = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : Union[str, Any] = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _UpperCAmelCase (UpperCamelCase__ : str ):
if set(UpperCamelCase__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_A : Optional[int] = ""
for word in coded.split():
while len(UpperCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
_A : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=7 ):
"""simple docstring"""
A__ = None
if token is not None:
A__ = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
A__ = '636036'
A__ = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
A__ = requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
return result["workflow_runs"]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = get_daily_ci_runs(UpperCamelCase__ )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run['id']
break
return workflow_run_id
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = get_last_daily_ci_runs(UpperCamelCase__ )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=UpperCamelCase__ , token=UpperCamelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase__ , artifact_url=UpperCamelCase__ , output_dir=UpperCamelCase__ , token=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(UpperCamelCase__ , F'''{artifact_name}.zip''' )
if os.path.isfile(UpperCamelCase__ ):
A__ = {}
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
with z.open(UpperCamelCase__ ) as f:
A__ = f.read().decode('UTF-8' )
return results
| 361 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=1 / 255 , __lowerCamelCase : Optional[Any]=True , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : int = image_mean
lowerCamelCase__ : Optional[int] = image_std
lowerCamelCase__ : List[Any] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Any = do_pad
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]=False ):
'''simple docstring'''
if not batched:
lowerCamelCase__ : Tuple = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size["shortest_edge"] * h / w )
lowerCamelCase__ : Optional[Any] = self.size["shortest_edge"]
elif w > h:
lowerCamelCase__ : List[Any] = self.size["shortest_edge"]
lowerCamelCase__ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase__ : Optional[int] = self.size["shortest_edge"]
lowerCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
else:
lowerCamelCase__ : Dict = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = DetaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
lowerCamelCase__ : List[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase__ : Dict = json.loads(f.read() )
lowerCamelCase__ : Any = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase__ : Union[str, Any] = DetaImageProcessor()
lowerCamelCase__ : List[str] = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
lowerCamelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase__ : Tuple = json.loads(f.read() )
lowerCamelCase__ : List[str] = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase__ : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase__ : Tuple = DetaImageProcessor(format="coco_panoptic" )
lowerCamelCase__ : Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ : List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
lowerCamelCase__ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 184 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = analyze_text(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE_ = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE_ = single_char_strings[ch]
SCREAMING_SNAKE_CASE_ = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCAmelCase ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
SCREAMING_SNAKE_CASE_ = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE_ = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE_ = two_char_strings[sequence]
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCAmelCase )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> tuple[dict, dict]:
SCREAMING_SNAKE_CASE_ = Counter() # type: ignore
SCREAMING_SNAKE_CASE_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase_ ( ) -> int:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 210 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Dict:
'''simple docstring'''
lowercase__: str = parent
lowercase__: Any = batch_size
lowercase__: int = num_channels
lowercase__: str = image_size
lowercase__: Optional[int] = min_resolution
lowercase__: Optional[int] = max_resolution
lowercase__: Any = do_resize
lowercase__: Any = size if size is not None else {'height': 18, 'width': 20}
lowercase__: Any = do_thumbnail
lowercase__: Tuple = do_align_axis
lowercase__: Optional[Any] = do_pad
lowercase__: Optional[int] = do_normalize
lowercase__: Tuple = image_mean
lowercase__: Union[str, Any] = image_std
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : List[str] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: List[str] = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_thumbnail' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__: int = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
# Initialize image_processing
lowercase__: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__: str = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 196 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for param in module.parameters():
lowercase = False
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = datetime.now()
lowercase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 350 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(lowerCAmelCase__ )
lowercase = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
A__ ,A__ ,head_mask[i] ,A__ ,A__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](A__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(A__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config
lowercase = BertEmbeddings(A__)
lowercase = DeeBertEncoder(A__)
lowercase = BertPooler(A__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if encoder_attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if token_type_ids is None:
lowercase = torch.zeros(A__ ,dtype=torch.long ,device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(A__ ,A__ ,A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(A__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=A__ ,position_ids=A__ ,token_type_ids=A__ ,inputs_embeds=A__)
lowercase = self.encoder(
A__ ,attention_mask=A__ ,head_mask=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,position_ids=A__ ,head_mask=A__ ,inputs_embeds=A__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(A__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(A__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 97 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = CLIPTokenizer
__magic_name__ = CLIPTokenizerFast
__magic_name__ = True
__magic_name__ = {}
__magic_name__ = False
def a_ ( self ):
super().setUp()
# fmt: off
snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def a_ ( self ):
snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@require_ftfy
def a_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of space type
snake_case = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of line break type
snake_case = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def a_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case = F'''{text_of_1_token} {text_of_1_token}'''
snake_case = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
snake_case = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ) + 1, len(__snake_case ) + 1 + len(__snake_case )) , )
snake_case = F''' {text}'''
snake_case = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
snake_case = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ) + 1, 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
def a_ ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__snake_case ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def a_ ( self ):
super().test_tokenization_python_rust_equals()
def a_ ( self ):
# CLIP always lower cases letters
pass
| 127 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
snake_case , snake_case , snake_case = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=3_00 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case = F'''{elt:.6f}''' if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A__ :
"""simple docstring"""
__magic_name__ = 5
__magic_name__ = 0.2
def __init__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = 3_0_0 , ):
snake_case = total
snake_case = '''''' if prefix is None else prefix
snake_case = leave
snake_case = parent
snake_case = width
snake_case = None
snake_case = None
snake_case = None
def a_ ( self , __snake_case , __snake_case = False , __snake_case = None ):
snake_case = value
if comment is not None:
snake_case = comment
if self.last_value is None:
snake_case = snake_case = time.time()
snake_case = snake_case = value
snake_case = snake_case = None
snake_case = self.warmup
snake_case = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case = time.time()
snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case = self.elapsed_time / (value - self.start_value)
else:
snake_case = None
if value >= self.total:
snake_case = self.total
snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
snake_case = value
snake_case = current_time
if self.average_time_per_item is None:
snake_case = 1
else:
snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None ):
super().__init__(__snake_case )
snake_case = None if column_names is None else [column_names]
snake_case = None
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self , __snake_case ):
if self.inner_table is None:
snake_case = [list(values.keys() ), list(values.values() )]
else:
snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def a_ ( self , __snake_case , __snake_case=None , __snake_case=3_0_0 ):
snake_case = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def a_ ( self ):
snake_case = None
self.display()
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = None
snake_case = None
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case = 0
snake_case = 0
snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case = NotebookTrainingTracker(state.max_steps , __snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case = self.training_tracker.add_child(len(__snake_case ) )
else:
snake_case = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case = None
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case = state.global_step
self.training_tracker.write_line(__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if self.training_tracker is not None:
snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case = log['''loss''']
break
if self.first_column == "Epoch":
snake_case = int(state.epoch )
else:
snake_case = state.global_step
snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case = re.sub(R'''\_loss$''' , '''''' , __snake_case )
snake_case = metrics.pop('''total_flos''' , __snake_case )
snake_case = metrics.pop('''epoch''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
snake_case = v
else:
snake_case = k.split('''_''' )
snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
snake_case = None
# Evaluation takes a long time so we should force the next update.
snake_case = True
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
snake_case = None
| 127 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : int = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase_ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase_ : List[Any] = {
'''google/rembert''': 2_5_6,
}
class UpperCamelCase_ ( a__ ):
_A : Optional[int] = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__="[CLS]" , snake_case__="[SEP]" , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , **snake_case__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = d
UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , snake_case__ , snake_case__=False ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowerCamelCase )
return pieces
def UpperCamelCase_ ( self , snake_case__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> List[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(_lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.sp_model.decode_pieces(_lowerCamelCase )
return out_string
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCamelCase ) )
return
UpperCAmelCase = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 369 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a__ ( a__ , a__ , a__ , a__ = 1_00 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = x_start
__SCREAMING_SNAKE_CASE = fnc(a__ )
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(a__ ):
# Approximates curve as a sequence of linear lines and sums their length
__SCREAMING_SNAKE_CASE = (x_end - x_start) / steps + xa
__SCREAMING_SNAKE_CASE = fnc(a__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__SCREAMING_SNAKE_CASE = xa
__SCREAMING_SNAKE_CASE = fxa
return length
if __name__ == "__main__":
def a__ ( a__ ):
"""simple docstring"""
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
UpperCAmelCase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(f"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""")
i *= 1_0
| 267 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a__ ( a__ , a__=None , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
while ask_again:
__SCREAMING_SNAKE_CASE = input(a__ )
try:
if default is not None and len(a__ ) == 0:
return default
return convert_value(a__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__ )
def a__ ( a__ , a__=[] , a__=None , a__=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BulletMenu(a__ , a__ )
__SCREAMING_SNAKE_CASE = menu.run(default_choice=a__ )
return convert_value(a__ ) if convert_value is not None else result
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def a__ ( a__ ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 267 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=100 , lowerCamelCase : Any=13 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Tuple=2 , lowerCamelCase : int=3 , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : str=37 , lowerCamelCase : Dict="gelu" , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Any=10 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Tuple=None , lowerCamelCase : str=[0, 1, 2, 3] , ) -> int:
__snake_case : List[Any] = parent
__snake_case : int = 100
__snake_case : str = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Tuple = patch_size
__snake_case : int = num_channels
__snake_case : List[Any] = is_training
__snake_case : Optional[Any] = use_labels
__snake_case : Tuple = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : str = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : int = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : Dict = scope
__snake_case : int = out_indices
__snake_case : Any = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Optional[Any] = (image_size // patch_size) ** 2
__snake_case : Union[str, Any] = num_patches + 1
def __snake_case ( self : Optional[Any] ) -> Any:
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Any:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __snake_case ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple ) -> Dict:
__snake_case : Optional[int] = BeitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : Union[str, Any] = BeitForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : Tuple = self.type_sequence_label_size
__snake_case : Optional[int] = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : str = 1
__snake_case : Any = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : List[str] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] ) -> str:
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = BeitForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__snake_case : int = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __snake_case ( self : List[str] ) -> Any:
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
__snake_case : Tuple = BeitModelTester(self )
__snake_case : str = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(lowerCamelCase )
__snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> Tuple:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> Any:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]:
continue
__snake_case : str = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : Tuple = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : Optional[Any] = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case : int = False
__snake_case : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : int = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
__snake_case : str = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : Any = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __snake_case ( self : str ) -> List[str]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = BeitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : List[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase )
__snake_case : List[str] = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : Any = image_processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values.to(lowerCamelCase )
# prepare bool_masked_pos
__snake_case : str = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(pixel_values=lowerCamelCase , bool_masked_pos=lowerCamelCase )
__snake_case : Optional[int] = outputs.logits
# verify the logits
__snake_case : List[Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase , atol=1E-2 ) )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[Any] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase )
__snake_case : int = self.default_image_processor
__snake_case : Dict = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**lowerCamelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : str = torch.Size((1, 1000) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Any = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
__snake_case : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Dict = prepare_img()
__snake_case : Tuple = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
__snake_case : str = outputs.logits
# verify the logits
__snake_case : int = torch.Size((1, 21841) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : int = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
__snake_case : int = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Tuple = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__snake_case : List[str] = model.to(lowerCamelCase )
__snake_case : Optional[int] = BeitImageProcessor(do_resize=lowerCamelCase , size=640 , do_center_crop=lowerCamelCase )
__snake_case : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__snake_case : str = Image.open(ds[0]["file"] )
__snake_case : Optional[int] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : str = outputs.logits
# verify the logits
__snake_case : str = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : List[Any] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
__snake_case : Optional[int] = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=lowerCamelCase , )
else:
__snake_case : List[Any] = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : Tuple = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__snake_case : List[Any] = model.to(lowerCamelCase )
__snake_case : Dict = BeitImageProcessor(do_resize=lowerCamelCase , size=640 , do_center_crop=lowerCamelCase )
__snake_case : List[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__snake_case : int = Image.open(ds[0]["file"] )
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**lowerCamelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
__snake_case : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 134 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a :List[str] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 132 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : List[Any] = logging.get_logger(__name__)
a_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Any = '''deberta-v2'''
def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = max_relative_positions
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(a) == str:
SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')]
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a)
SCREAMING_SNAKE_CASE = pooler_dropout
SCREAMING_SNAKE_CASE = pooler_hidden_act
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 137 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 0 |
import random
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =num - 1
__UpperCamelCase =0
while s % 2 == 0:
__UpperCamelCase =s // 2
t += 1
for _ in range(5 ):
__UpperCamelCase =random.randrange(2 , num - 1 )
__UpperCamelCase =pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
__UpperCamelCase =0
while v != (num - 1):
if i == t - 1:
return False
else:
__UpperCamelCase =i + 1
__UpperCamelCase =(v**2) % num
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if num < 2:
return False
__UpperCamelCase =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_24 ):
while True:
__UpperCamelCase =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
_A = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 62 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], UpperCAmelCase__ : str = "", UpperCAmelCase__ : bool = False ):
__lowercase = {}
# A node will be a leaf if the tree contains its word
__lowercase = is_leaf
__lowercase = prefix
def _lowercase ( self : List[Any], UpperCAmelCase__ : str ):
__lowercase = 0
for q, w in zip(self.prefix, snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowercase ( self : Tuple, UpperCAmelCase__ : list[str] ):
for word in words:
self.insert(snake_case__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str ):
if self.prefix == word:
__lowercase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowercase = RadixNode(prefix=snake_case__, is_leaf=snake_case__ )
else:
__lowercase = self.nodes[word[0]]
__lowercase = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowercase = remaining_prefix
__lowercase = self.nodes[matching_string[0]]
__lowercase = RadixNode(snake_case__, snake_case__ )
__lowercase = aux_node
if remaining_word == "":
__lowercase = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
__lowercase = self.nodes.get(word[0], snake_case__ )
if not incoming_node:
return False
else:
__lowercase = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str ):
__lowercase = self.nodes.get(word[0], snake_case__ )
if not incoming_node:
return False
else:
__lowercase = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowercase = list(self.nodes.values() )[0]
__lowercase = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowercase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowercase = False
# If there is 1 edge, we merge it with its child
else:
__lowercase = list(incoming_node.nodes.values() )[0]
__lowercase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowercase = merging_node.nodes
return True
def _lowercase ( self : str, UpperCAmelCase__ : int = 0 ):
if self.prefix != "":
print("-" * height, self.prefix, " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _A ( ) -> Any:
'''simple docstring'''
__lowercase = "banana bananas bandana band apple all beast".split()
__lowercase = RadixNode()
root.insert_many(__lowerCamelCase)
assert all(root.find(__lowerCamelCase) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def _A ( ) -> int:
'''simple docstring'''
assert test_trie()
def _A ( ) -> Dict:
'''simple docstring'''
__lowercase = RadixNode()
__lowercase = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase)
print("Words:", __lowerCamelCase)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = "audio-spectrogram-transformer"
def __init__( self : Optional[Any], UpperCAmelCase__ : Dict=7_6_8, UpperCAmelCase__ : Optional[Any]=1_2, UpperCAmelCase__ : Optional[int]=1_2, UpperCAmelCase__ : List[str]=3_0_7_2, UpperCAmelCase__ : List[str]="gelu", UpperCAmelCase__ : Dict=0.0, UpperCAmelCase__ : str=0.0, UpperCAmelCase__ : Any=0.02, UpperCAmelCase__ : Any=1E-12, UpperCAmelCase__ : Dict=1_6, UpperCAmelCase__ : Optional[int]=True, UpperCAmelCase__ : Union[str, Any]=1_0, UpperCAmelCase__ : List[Any]=1_0, UpperCAmelCase__ : Optional[int]=1_0_2_4, UpperCAmelCase__ : List[str]=1_2_8, **UpperCAmelCase__ : List[Any], ):
super().__init__(**UpperCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins
| 144 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def __lowerCamelCase ( snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
while ask_again:
_SCREAMING_SNAKE_CASE = input(a_ )
try:
if default is not None and len(a_ ) == 0:
return default
return convert_value(a_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a_ )
def __lowerCamelCase ( snake_case__ ,snake_case__=[] ,snake_case__=None ,snake_case__=0 ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BulletMenu(a_ ,a_ )
_SCREAMING_SNAKE_CASE = menu.run(default_choice=a_ )
return convert_value(a_ ) if convert_value is not None else result
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(a_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(a_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(a_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(a_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(a_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __UpperCAmelCase (argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super()._format_usage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 306 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = 'abeja/gpt-neox-japanese-2.7b'
snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
snake_case_ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids
snake_case_ = model.generate(a , max_length=50 )
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 178 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = 0
A_ : str = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
A_ : List[Any] = len(SCREAMING_SNAKE_CASE ) // 2
A_ : str = arr[0:mid]
A_ : str = arr[mid:]
A_ : Union[str, Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
A_ : Any = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : int = []
A_ : List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A_ : List[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE )
A_ : List[str] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A_ : str = count_inversions_bf(SCREAMING_SNAKE_CASE )
A_ : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
A_ : Union[str, Any] = []
A_ : str = count_inversions_bf(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 363 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 65 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
"""simple docstring"""
__a = 0
__a = len(_SCREAMING_SNAKE_CASE ) # No of vertices in graph
__a = [0] * n
__a = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
__a = True
__a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , id_ )
__a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__a = min(low[at] , low[to] )
__a = []
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
dfs(_SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
lowerCamelCase = namedtuple("""CoinsDistribResult""", """moves excess""")
def a__ ( lowerCAmelCase__ ):
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase__ ) != count_coins(lowerCAmelCase__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCAmelCase__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase__ )
+ abs(lowerCAmelCase__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase__ , lowerCAmelCase__ )
return get_distrib(lowerCAmelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[int] = AltDiffusionPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase : Union[str, Any] = CLIPTextModel(_A )
lowercase : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase : Optional[int] = 77
lowercase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self : Union[str, Any] , _A : int , _A : Dict=0 ) -> Any:
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
lowercase : str = torch.manual_seed(_A )
else:
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
lowercase : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self : str ) -> Any:
"""simple docstring"""
lowercase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
lowercase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase : Any = RobertaSeriesModelWithTransformation(_A )
lowercase : List[Any] = text_encoder
lowercase : Optional[Any] = AltDiffusionPipeline(**_A )
lowercase : Optional[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
lowercase : Union[str, Any] = self.get_dummy_inputs(_A )
lowercase : int = '''A photo of an astronaut'''
lowercase : List[str] = alt_pipe(**_A )
lowercase : int = output.images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Optional[Any] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple = self.get_dummy_components()
lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
lowercase : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase : int = RobertaSeriesModelWithTransformation(_A )
lowercase : str = text_encoder
lowercase : Dict = AltDiffusionPipeline(**_A )
lowercase : Union[str, Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
lowercase : Union[str, Any] = self.get_dummy_inputs(_A )
lowercase : Union[str, Any] = alt_pipe(**_A )
lowercase : Optional[Any] = output.images
lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Union[str, Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_A )
lowercase : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
lowercase : Any = '''A painting of a squirrel eating a burger'''
lowercase : Dict = torch.manual_seed(0 )
lowercase : int = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Union[str, Any] = output.images
lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Dict = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_A , safety_checker=_A )
lowercase : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
lowercase : Dict = '''A painting of a squirrel eating a burger'''
lowercase : str = torch.manual_seed(0 )
lowercase : Optional[int] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''numpy''' )
lowercase : Tuple = output.images
lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 308 |
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq) | 308 | 1 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : list[list] ) -> list[list]:
lowerCamelCase_ : Optional[Any] =current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
lowerCamelCase_ : Union[str, Any] =column
continue
lowerCamelCase_ : List[Any] =column / magnitude
# Subtract to cancel term
lowerCamelCase_ : Any =current_set[0]
lowerCamelCase_ : Optional[Any] =[first_row]
lowerCamelCase_ : str =current_set[1::]
for row in current_set:
lowerCamelCase_ : str =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_ : str =final_set[0]
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : List[Any] =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_ : Dict =simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =resultant
return final_set
def _snake_case ( lowerCamelCase__ : list[list] ) -> list:
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
lowerCamelCase_ : List[str] =len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_ : List[Any] =equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_ : Optional[int] =data_set.copy()
lowerCamelCase_ : Union[str, Any] =[]
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
lowerCamelCase_ : Optional[int] =data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =data_set.copy()
lowerCamelCase_ : List[str] =simplify(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =simplified[::-1]
lowerCamelCase_ : list =[]
for row in simplified:
lowerCamelCase_ : List[str] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_ : Any =row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
lowerCamelCase_ : int =temp_row[1::]
lowerCamelCase_ : List[Any] =temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
lowerCamelCase_ : Tuple =[]
for item in solutions:
final.append(float(round(lowerCamelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[int] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 209 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : int = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A__ : str = 'hopper-medium-v2'
A__ : Dict = gym.make(env_name)
A__ : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A__ : Dict = env.reset()
A__ : Optional[int] = 0
A__ : str = 0
A__ : List[Any] = 1_000
A__ : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : Any = env.step(denorm_actions)
A__ : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 209 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A_ ( snake_case : Dict , snake_case : Tuple , snake_case : List[Any] , snake_case : Any ) -> int:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A_ ( snake_case : Optional[Any] , snake_case : int , snake_case : List[Any] , snake_case : str , snake_case : List[Any]=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = F.mse_loss(snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case )
def A_ ( snake_case : Optional[Any] , snake_case : str=False ) -> Tuple:
'''simple docstring'''
set_seed(42 )
__UpperCamelCase = RegressionModel()
__UpperCamelCase = deepcopy(snake_case )
__UpperCamelCase = RegressionDataset(length=80 )
__UpperCamelCase = DataLoader(snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
__UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCamelCase = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 )
__UpperCamelCase = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case , snake_case , snake_case )
else:
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A_ ( snake_case : str ) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case )
# Use a single batch
__UpperCamelCase , __UpperCamelCase = next(iter(snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
else:
# Sync grads
step_model(snake_case , snake_case , snake_case , snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case , snake_case , snake_case , snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )]
def A_ ( snake_case : str ) -> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case )
# Use a single batch
__UpperCamelCase , __UpperCamelCase = next(iter(snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
else:
# Sync grads
step_model(snake_case , snake_case , snake_case , snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )]
def A_ ( snake_case : Optional[int]=False , snake_case : Tuple=False ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = Accelerator(
split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case )
for iteration, batch in enumerate(snake_case ):
__UpperCamelCase , __UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case , snake_case , snake_case , snake_case , snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )]
GradientState._reset_state()
def A_ ( snake_case : str=False , snake_case : Tuple=False ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = Accelerator(
split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case , snake_case )
for iteration, batch in enumerate(snake_case ):
__UpperCamelCase , __UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case , snake_case , snake_case , snake_case , snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case ):
step_model(snake_case , snake_case , snake_case , snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case , snake_case , snake_case , snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase = RegressionDataset(length=80 )
__UpperCamelCase = DataLoader(snake_case , batch_size=16 )
__UpperCamelCase = RegressionDataset(length=96 )
__UpperCamelCase = DataLoader(snake_case , batch_size=16 )
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case )
if iteration < len(snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case )
if batch_num < len(snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A_ ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = Accelerator()
__UpperCamelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(snake_case , snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case , snake_case )
def A_ ( snake_case : Tuple ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCamelCase ( _A=32 , _A=10 , _A=100 , _A=1026 , _A=True , _A="data/tokenized_stories_train_wikitext103.jbl" , _A="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowerCAmelCase_ = generate_datasets(
_lowercase , _lowercase , number=_lowercase , min_len=1026 , trim=_lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowerCAmelCase_ = load_gpta('''gpt2''' ).to(_lowercase )
print('''computing perplexity on objective set''' )
lowerCAmelCase_ = compute_perplexity(_lowercase , _lowercase , _lowercase ).item()
print('''perplexity on objective set:''' , _lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCamelCase ( _A , _A=15 , _A=128 , _A=100 , _A="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowerCAmelCase_ = SecondaryLearner(_lowercase )
# Train secondary learner
lowerCAmelCase_ = train_secondary_learner(
_lowercase , _lowercase , max_epochs=_lowercase , batch_size=_lowercase , eval_freq=100 , igf_model_path=_lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCamelCase ( _A , _A , _A , _A=32 , _A=1000 , _A=16 , _A=1.0 , _A=recopy_gpta , _A=None , _A=10 , _A="gpt2_finetuned.pt" , ):
lowerCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ = RandomSampler(_lowercase )
lowerCAmelCase_ = DataLoader(_lowercase , sampler=_lowercase )
lowerCAmelCase_ = max_steps // (len(_lowercase )) + 1
lowerCAmelCase_ = 0
lowerCAmelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=_lowercase )
lowerCAmelCase_ = recopy_model(_lowercase , _lowercase , _lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowercase )
secondary_learner.eval()
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
lowerCAmelCase_ = []
# Compute the performance of the transformer model at the beginning
lowerCAmelCase_ = compute_perplexity(_lowercase , _lowercase , _lowercase )
test_perps.append(_lowercase )
print('''Test perplexity, step''' , _lowercase , ''':''' , _lowercase )
for epoch in range(int(_lowercase ) ):
for step, example in enumerate(_lowercase ):
torch.cuda.empty_cache()
lowerCAmelCase_ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCAmelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCAmelCase_ = model(_lowercase , labels=_lowercase )
lowerCAmelCase_ = True
if secondary_learner is not None:
lowerCAmelCase_ = secondary_learner.forward(
torch.tensor(_lowercase , dtype=torch.long , device=_lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCAmelCase_ = -1
if predicted_q < threshold:
lowerCAmelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCAmelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCAmelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCAmelCase_ = compute_perplexity(_lowercase , _lowercase , _lowercase )
test_perps.append(_lowercase )
print('''Test perplexity, step''' , _lowercase , ''':''' , _lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_lowercase , default=_lowercase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_lowercase , default=_lowercase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_lowercase , type=_lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_lowercase , default=_lowercase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=_lowercase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=_lowercase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=_lowercase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=_lowercase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_lowercase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_lowercase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=_lowercase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=_lowercase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_lowercase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_lowercase , type=_lowercase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_lowercase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_lowercase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_lowercase , type=_lowercase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowercase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowerCAmelCase_ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowerCAmelCase_ = training_secondary_learner(
_lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCAmelCase_ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowercase , _lowercase , _lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowercase , secondary_learner=_lowercase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 363 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict ) -> str:
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = {}
def lowercase ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
if vertex not in self.adjacency:
__lowerCAmelCase = {}
self.num_vertices += 1
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> int:
self.add_vertex(SCREAMING_SNAKE_CASE_ )
self.add_vertex(SCREAMING_SNAKE_CASE_ )
if head == tail:
return
__lowerCAmelCase = weight
__lowerCAmelCase = weight
def lowercase ( self : str ) -> Tuple:
__lowerCAmelCase = self.get_edges()
for edge in edges:
__lowerCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__lowerCAmelCase = list(edges[i] )
edges.sort(key=lambda lowerCAmelCase_ : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCAmelCase = edges[i][2] + 1
for edge in edges:
__lowerCAmelCase = edge
__lowerCAmelCase = weight
__lowerCAmelCase = weight
def __str__( self : Optional[int] ) -> str:
__lowerCAmelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCAmelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase ( self : str ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def lowercase ( lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None ) -> Optional[int]:
__lowerCAmelCase = Graph()
if vertices is None:
__lowerCAmelCase = []
if edges is None:
__lowerCAmelCase = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE_ )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE_ )
return g
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int ) -> Any:
__lowerCAmelCase = {}
__lowerCAmelCase = {}
def __len__( self : Tuple ) -> List[Any]:
return len(self.parent )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = item
__lowerCAmelCase = 0
return item
def lowercase ( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE_ )
if item != self.parent[item]:
__lowerCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Optional[int]:
__lowerCAmelCase = self.find(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = self.find(SCREAMING_SNAKE_CASE_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCAmelCase = roota
return roota
return None
@staticmethod
def lowercase ( lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = graph.num_vertices
__lowerCAmelCase = Graph.UnionFind()
__lowerCAmelCase = []
while num_components > 1:
__lowerCAmelCase = {}
for vertex in graph.get_vertices():
__lowerCAmelCase = -1
__lowerCAmelCase = graph.get_edges()
for edge in edges:
__lowerCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCAmelCase = edge
__lowerCAmelCase = union_find.find(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = union_find.find(SCREAMING_SNAKE_CASE_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCAmelCase = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE_ ) != union_find.find(SCREAMING_SNAKE_CASE_ ):
union_find.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
mst_edges.append(cheap_edge[vertex] )
__lowerCAmelCase = num_components - 1
__lowerCAmelCase = Graph.build(edges=SCREAMING_SNAKE_CASE_ )
return mst
| 366 |
import mpmath # for roots of unity
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None ) -> List[Any]:
# Input as list
__lowerCAmelCase = list(poly_a or [0] )[:]
__lowerCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowerCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowerCAmelCase = self.__multiply()
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
__lowerCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase_ ) <= 1:
return dft[0]
#
__lowerCAmelCase = self.c_max_length // 2
while next_ncol > 0:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root**next_ncol
# First half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowerCAmelCase = new_dft
__lowerCAmelCase = next_ncol // 2
return dft[0]
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.__dft('A' )
__lowerCAmelCase = self.__dft('B' )
__lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCAmelCase = 2
while next_ncol <= self.c_max_length:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root ** (next_ncol // 2)
__lowerCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowerCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> int:
__lowerCAmelCase = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowerCAmelCase = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowerCAmelCase = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
a_ =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
lowerCAmelCase__ = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
lowerCAmelCase__ = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowerCAmelCase__ = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
lowerCAmelCase__ = text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
lowerCAmelCase__ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowerCAmelCase__ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
import torch
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = pipeline("text-classification" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = pipeline("text-classification" , framework="tf" )
lowerCAmelCase__ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowerCAmelCase__ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = "HuggingFace is in"
lowerCAmelCase__ = text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowerCAmelCase__ = ["HuggingFace is in ", "Paris is in France"]
lowerCAmelCase__ = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
lowerCAmelCase__ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowerCAmelCase__ = text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 340 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output | 145 | 0 |
def snake_case( __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
while b:
lowercase , lowercase : Optional[int] = b, a % b
return a
def snake_case( __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_lowercase , a % b )
def snake_case( ) -> int:
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main() | 359 |
def snake_case( __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ )
lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 116 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Dict = num_channels
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : str = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
__lowerCAmelCase : Optional[int] = num_patches + 1
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = FlaxViTModel(config=A_ )
__lowerCAmelCase : Tuple = model(A_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Tuple = (self.image_size, self.image_size)
__lowerCAmelCase : Optional[Any] = (self.patch_size, self.patch_size)
__lowerCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.type_sequence_label_size
__lowerCAmelCase : Optional[Any] = FlaxViTForImageClassification(config=A_ )
__lowerCAmelCase : Any = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : Union[str, Any] = FlaxViTForImageClassification(A_ )
__lowerCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = model(A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase__ ( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
__lowerCAmelCase : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
__lowerCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = self._prepare_for_class(A_ , A_ )
__lowerCAmelCase : Optional[Any] = model_class(A_ )
@jax.jit
def model_jitted(A_ , **A_ ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__lowerCAmelCase : List[Any] = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCAmelCase : Any = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
__lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
| 275 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowercase :
"""simple docstring"""
__A = XGLMConfig
__A = {}
__A = "gelu"
def __init__(self , lowerCamelCase_ , lowerCamelCase_=14 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=0.02 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_labels
a = vocab_size
a = d_model
a = num_hidden_layers
a = num_attention_heads
a = ffn_dim
a = activation_function
a = activation_dropout
a = attention_dropout
a = max_position_embeddings
a = initializer_range
a = None
a = 0
a = 2
a = 1
def UpperCamelCase_ (self ):
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = self.get_config()
a = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase_ (self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__A = (TFXGLMForCausalLM,) if is_tf_available() else ()
__A = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFXGLMModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self , lowerCamelCase_=True ):
"""simple docstring"""
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
a = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
a = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
a = tokenizer("Today is a nice day and" , return_tensors="tf" )
a = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
a = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
a = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
a = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a = "left"
# use different length sentences to test batching
a = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
a = tokenizer(lowerCamelCase_ , return_tensors="tf" , padding=lowerCamelCase_ )
a = inputs["input_ids"]
a = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
a = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 364 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase: List[Any] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a( A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(A , A )
_lowercase: Optional[int] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a( A : str ) -> Union[str, Any]:
"""simple docstring"""
a = list(s_dict.keys() )
for key in keys:
a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a = new_key.replace(A , A )
print(f'''{key} -> {new_key}''' )
a = s_dict.pop(A )
return s_dict
def a( A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a , a = emb.weight.shape
a = nn.Linear(A , A , bias=A )
a = emb.weight.data
return lin_layer
def a( A : str , A : str ) -> bytes:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
a = os.path.basename(A )
a = url.split("/" )[-2]
a = os.path.join(A , A )
if os.path.exists(A ) and not os.path.isfile(A ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(A ):
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(A ) as source, open(A , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A , unit_divisor=1024 ) as loop:
while True:
a = source.read(8192 )
if not buffer:
break
output.write(A )
loop.update(len(A ) )
a = open(A , "rb" ).read()
if hashlib.shaaaa(A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def a( A : List[str] , A : Union[str, Any] ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a = _download(_MODELS[checkpoint_path] )
else:
a = torch.load(A , map_location="cpu" )
a = original_checkpoint["dims"]
a = original_checkpoint["model_state_dict"]
a = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(A )
rename_keys(A )
a = True
a = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A , decoder_ffn_dim=A , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a = WhisperForConditionalGeneration(A )
a , a = model.model.load_state_dict(A , strict=A )
if len(A ) > 0 and not set(A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
a = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a = proj_out_weights
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase: List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 0 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = n
__lowerCamelCase = [None] * self.n
__lowerCamelCase = 0 # index of the first element
__lowerCamelCase = 0
__lowerCamelCase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowercase_ ( self ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCamelCase = data
__lowerCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCamelCase = self.array[self.front]
__lowerCamelCase = None
__lowerCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 90 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE : Any = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case=False , __snake_case=False , __snake_case=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case = np.array([re.sub(__snake_case , '''''' , __snake_case ) for x in predictions] )
snake_case = np.array([re.sub(__snake_case , '''''' , __snake_case ) for x in references] )
else:
snake_case = np.asarray(__snake_case )
snake_case = np.asarray(__snake_case )
if ignore_case:
snake_case = np.char.lower(__snake_case )
snake_case = np.char.lower(__snake_case )
if ignore_punctuation:
snake_case = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
snake_case = np.char.translate(__snake_case , table=__snake_case )
snake_case = np.char.translate(__snake_case , table=__snake_case )
if ignore_numbers:
snake_case = string.digits.maketrans('''''' , '''''' , string.digits )
snake_case = np.char.translate(__snake_case , table=__snake_case )
snake_case = np.char.translate(__snake_case , table=__snake_case )
snake_case = predictions == references
return {"exact_match": np.mean(__snake_case ) * 1_0_0}
| 127 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __a ( lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase_= orig_key.replace("""model.""" ,"""""" )
if "norm1" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm1""" ,"""attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm2""" ,"""output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase_= orig_key.replace("""norm""" ,"""LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase_= orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase_= orig_key.replace(F"""transformer_{layer_num}""" ,F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase_= orig_key.replace("""mha.attn""" ,"""attention.self""" )
if "mha" in orig_key:
UpperCAmelCase_= orig_key.replace("""mha""" ,"""attention""" )
if "W_q" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_q""" ,"""self.query""" )
if "W_k" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_k""" ,"""self.key""" )
if "W_v" in orig_key:
UpperCAmelCase_= orig_key.replace("""W_v""" ,"""self.value""" )
if "ff1" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff1""" ,"""intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff2""" ,"""output.dense""" )
if "ff" in orig_key:
UpperCAmelCase_= orig_key.replace("""ff""" ,"""output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase_= orig_key.replace("""mlm.mlm_class""" ,"""cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase_= orig_key.replace("""mlm""" ,"""cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase_= """yoso.""" + orig_key
return orig_key
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_= val
UpperCAmelCase_= orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase_= torch.arange(lowerCAmelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase_= YosoConfig.from_json_file(lowerCAmelCase_ )
UpperCAmelCase_= YosoForMaskedLM(lowerCAmelCase_ )
UpperCAmelCase_= convert_checkpoint_helper(config.max_position_embeddings ,lowerCAmelCase_ )
print(model.load_state_dict(lowerCAmelCase_ ) )
model.eval()
model.save_pretrained(lowerCAmelCase_ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 356 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277 | 0 |
from __future__ import annotations
def _lowercase ( UpperCamelCase_ ) -> list[int]: # This function is recursive
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE__ = array[0]
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE__ = longest_subsequence(UpperCamelCase_ )
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE__ = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE__ = [pivot, *longest_subsequence(UpperCamelCase_ )]
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ = int(items[1] )
else:
SCREAMING_SNAKE_CASE__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(
UpperCamelCase_ , add_adapter=UpperCamelCase_ , adapter_stride=UpperCamelCase_ , adapter_kernel_size=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , output_hidden_size=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ )
# load model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE__ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , use_auth_token=UpperCamelCase_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ = WavaVecaModel(UpperCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
# load decoder weights
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = MBartaaTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 'mbart50'
SCREAMING_SNAKE_CASE__ = 'wav2vec2'
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 250004
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 176 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _UpperCAmelCase ( _UpperCamelCase : str ) -> str:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(_UpperCamelCase )
if not _is_chinese_char(_UpperCamelCase ):
return 0
return 1
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> Any:
A_ = set()
for token in tokens:
A_ = len(_UpperCamelCase ) > 1 and is_chinese(_UpperCamelCase )
if chinese_word:
word_set.add(_UpperCamelCase )
A_ = list(_UpperCamelCase )
return word_list
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : set() ) -> Tuple:
if not chinese_word_set:
return bert_tokens
A_ = max([len(_UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ ,A_ = 0, len(_UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, _UpperCamelCase )
for i in range(_UpperCamelCase, 1, -1 ):
A_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = '''##''' + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : LTP, _UpperCamelCase : BertTokenizer ) -> Dict:
A_ = []
for i in range(0, len(_UpperCamelCase ), 1_00 ):
A_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
A_ = [get_chinese_word(_UpperCamelCase ) for r in res]
ltp_res.extend(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
A_ = []
for i in range(0, len(_UpperCamelCase ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_UpperCamelCase, truncation=_UpperCamelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(_UpperCamelCase, _UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(_UpperCamelCase )
input_tokens.append(_UpperCamelCase )
A_ = add_sub_symbol(_UpperCamelCase, _UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(_UpperCamelCase ) == 1 and _is_chinese_char(ord(_UpperCamelCase ) ):
ref_id.append(_UpperCamelCase )
ref_ids.append(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
return ref_ids
def _UpperCAmelCase ( _UpperCamelCase : Tuple ) -> Any:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(_UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
A_ = [json.dumps(_UpperCamelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__snake_case : Any = parser.parse_args()
main(args)
| 366 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 106 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'{test_file} instead.' )
__a =components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
__a =components[:-1] + [test_fn.replace('.py' , '' )]
__a ='.'.join(_snake_case )
return test_module_path
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =get_module_path(_snake_case )
__a =importlib.import_module(_snake_case )
return test_module
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[]
__a =get_test_module(_snake_case )
for attr in dir(_snake_case ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_snake_case , _snake_case ) )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =[]
__a =get_test_module(_snake_case )
for attr in dir(_snake_case ):
__a =getattr(_snake_case , _snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a =getattr(_snake_case , 'all_model_classes' , [] )
if len(_snake_case ) > 0:
test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =test_class()
if hasattr(_snake_case , 'setUp' ):
test.setUp()
__a =None
if hasattr(_snake_case , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a =test.model_tester.__class__
return model_tester
def UpperCamelCase_( _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =get_test_classes_for_model(_snake_case , _snake_case )
__a =[]
for test_class in test_classes:
__a =get_model_tester_from_test_class(_snake_case )
if tester_class is not None:
tester_classes.append(_snake_case )
# sort with class names
return sorted(_snake_case , key=lambda _snake_case : x.__name__ )
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =get_test_classes(_snake_case )
__a ={test_class: get_model_tester_from_test_class(_snake_case ) for test_class in test_classes}
return test_tester_mapping
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =get_model_classes(_snake_case )
__a ={
model_class: get_test_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_test_mapping
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =get_model_classes(_snake_case )
__a ={
model_class: get_tester_classes_for_model(_snake_case , _snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return o
elif isinstance(_snake_case , _snake_case ):
return o.__name__
elif isinstance(_snake_case , (list, tuple) ):
return [to_json(_snake_case ) for x in o]
elif isinstance(_snake_case , _snake_case ):
return {to_json(_snake_case ): to_json(_snake_case ) for k, v in o.items()}
else:
return o
| 308 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.