code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Any, a_: Optional[int] ):
if isinstance(a_, a_ ):
_UpperCAmelCase : Optional[Any] = np.full((len(a_ ), sequence_length, 2), a_ )
else:
_UpperCAmelCase : List[str] = np.full((len(a_ ), sequence_length), a_ )
for i, tensor in enumerate(a_ ):
if padding_side == "right":
if isinstance(a_, a_ ):
_UpperCAmelCase : Any = tensor[:sequence_length]
else:
_UpperCAmelCase : str = tensor[:sequence_length]
else:
if isinstance(a_, a_ ):
_UpperCAmelCase : List[Any] = tensor[:sequence_length]
else:
_UpperCAmelCase : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Optional[Any] = ord(a_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_UpperCAmelCase : Union[str, Any] = unicodedata.category(a_ )
if cat.startswith("P" ):
return True
return False
@dataclass
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : int = -1_00
UpperCamelCase_ : str = "pt"
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
import torch
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
_UpperCAmelCase : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
_UpperCAmelCase : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
_UpperCAmelCase : Dict = [
list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels
]
else:
_UpperCAmelCase : List[str] = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels
]
_UpperCAmelCase : int = [feature["ner_tags"] for feature in features]
_UpperCAmelCase : Optional[int] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = [feature["original_entity_spans"] for feature in features]
_UpperCAmelCase : Optional[Any] = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 353 | '''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Tuple = 10
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = output.prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : str = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : int = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3 | 17 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = CycleDiffusionPipeline
UpperCamelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCamelCase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCamelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase : str = CLIPTextModel(lowerCAmelCase__ )
_UpperCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : int=0 ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : int = image / 2 + 0.5
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[int] = self.get_dummy_components()
_UpperCAmelCase : str = CycleDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = pipe(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : int = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase__ , "half" ):
_UpperCAmelCase : int = module.half()
_UpperCAmelCase : str = CycleDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : Tuple = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = pipe(**lowerCAmelCase__ )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_UpperCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_UpperCAmelCase : Optional[Any] = init_image.resize((5_1_2, 5_1_2) )
_UpperCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4"
_UpperCAmelCase : List[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
_UpperCAmelCase : List[Any] = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = "A black colored car"
_UpperCAmelCase : Union[str, Any] = "A blue colored car"
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(
prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_UpperCAmelCase : int = init_image.resize((5_1_2, 5_1_2) )
_UpperCAmelCase : List[str] = "CompVis/stable-diffusion-v1-4"
_UpperCAmelCase : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
_UpperCAmelCase : str = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : List[str] = "A black colored car"
_UpperCAmelCase : Optional[Any] = "A blue colored car"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Any = pipe(
prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 354 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase__ : str=[2, 2, 3, 2] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[Any]=3_7 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : int=1_0 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=["stage2", "stage3", "stage4"] , lowerCAmelCase__ : List[Any]=[2, 3, 4] , lowerCAmelCase__ : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = num_stages
_UpperCAmelCase : List[str] = hidden_sizes
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Optional[int] = out_features
_UpperCAmelCase : Optional[Any] = out_indices
_UpperCAmelCase : Any = scope
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase : str = config_and_inputs
_UpperCAmelCase : str = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[Any] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = ConvNextVaModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase : Dict = True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCAmelCase : int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCAmelCase : str = model(**lowerCAmelCase__ ).loss
loss.backward()
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Dict = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ):
_UpperCAmelCase : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : int = preprocessor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 355 | '''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( a_: int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(a_: float, a_: float ) -> bool:
_UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase : str = mean(
int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) )
for _ in range(a_ ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase : Optional[int] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ):
return mean(
function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ):
def identity_function(a_: float ) -> float:
return x
_UpperCAmelCase : Union[str, Any] = area_under_curve_estimator(
a_, a_, a_, a_ )
_UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def __UpperCAmelCase ( a_: int ):
def function_to_integrate(a_: float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase : List[str] = area_under_curve_estimator(
a_, a_, 0.0, 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
import os
import sys
import transformers
__a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None) | 356 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2FeatureExtractor']
__a = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 17 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( a_: Iterable[str], a_: int ):
_UpperCAmelCase : List[str] = iter(a_ )
while True:
_UpperCAmelCase : int = tuple(itertools.islice(a_, a_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : List[Any] = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCAmelCase : List[str] = ""
if len(a_ ) < 2:
return dirty
for i in range(len(a_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( a_: str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_UpperCAmelCase : Dict = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCAmelCase : str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a_ )
return table
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Tuple = generate_table(a_ )
_UpperCAmelCase : Union[str, Any] = prepare_input(a_ )
_UpperCAmelCase : List[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_, 2 ):
_UpperCAmelCase : Optional[Any] = divmod(table.index(a_ ), 5 )
_UpperCAmelCase : Union[str, Any] = divmod(table.index(a_ ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( a_: str, a_: str ):
_UpperCAmelCase : Optional[int] = generate_table(a_ )
_UpperCAmelCase : Optional[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a_, 2 ):
_UpperCAmelCase : Optional[int] = divmod(table.index(a_ ), 5 )
_UpperCAmelCase : Any = divmod(table.index(a_ ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 357 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str = " " ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = sentence_delimiter
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__a = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=False ) -> int:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[int] = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 358 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__a = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features]
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : int = len(features[0]["input_ids"] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) )
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", a_, a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
_UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[Any] = data_args.validation_file
_UpperCAmelCase : Dict = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[int] = load_dataset(
a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Dict = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )]
_UpperCAmelCase : List[Any] = "sent1"
_UpperCAmelCase : Optional[int] = "sent2"
if data_args.max_seq_length is None:
_UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_UpperCAmelCase : Dict = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : Tuple = examples[question_header_name]
_UpperCAmelCase : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
_UpperCAmelCase : List[str] = list(chain(*a_ ) )
_UpperCAmelCase : Dict = list(chain(*a_ ) )
# Tokenize
_UpperCAmelCase : List[Any] = tokenizer(
a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a_: Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions
_UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Any = Trainer(
model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : str = train_result.metrics
_UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
_UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) )
trainer.log_metrics("train", a_ )
trainer.save_metrics("train", a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
_UpperCAmelCase : Tuple = min(a_, len(a_ ) )
trainer.log_metrics("eval", a_ )
trainer.save_metrics("eval", a_ )
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __UpperCAmelCase ( a_: int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 17 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__a = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
__a = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
__a = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]="uniform_average" , lowerCAmelCase__ : Optional[Any]=True ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse} | 359 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[str] = model
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
# load longformer model from model identifier
_UpperCAmelCase : int = LongformerModel.from_pretrained(a_ )
_UpperCAmelCase : Any = LightningModel(a_ )
_UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a_ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 17 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__a = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 360 | '''simple docstring'''
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class A__ :
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = []
def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = obj
_UpperCAmelCase : int = target
_UpperCAmelCase : Optional[int] = new
_UpperCAmelCase : Any = target.split("." )[0]
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Dict = attrs or []
def __enter__( self : List[str] ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
_UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
_UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
_UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase : Dict = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 17 | 0 |
'''simple docstring'''
import inspect
import unittest
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
_UpperCAmelCase : int = inspect.getmembers(lowerCAmelCase__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_UpperCAmelCase : Union[str, Any] = "k-diffusion"
elif backend == "invisible_watermark":
_UpperCAmelCase : Tuple = "invisible-watermark"
assert backend in deps, F"""{backend} is not in the deps table!""" | 361 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 362 | '''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCAmelCase ( a_: List[str]=None ):
if subparsers is not None:
_UpperCAmelCase : str = subparsers.add_parser("env" )
else:
_UpperCAmelCase : Any = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=a_, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = torch.__version__
_UpperCAmelCase : int = torch.cuda.is_available()
_UpperCAmelCase : int = is_xpu_available()
_UpperCAmelCase : List[Any] = is_npu_available()
_UpperCAmelCase : Optional[int] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(a_ ):
_UpperCAmelCase : str = load_config_from_file(args.config_file ).to_dict()
_UpperCAmelCase : int = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(a_ ),
"PyTorch NPU available": str(a_ ),
"System RAM": f"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
_UpperCAmelCase : List[str] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
_UpperCAmelCase : Optional[int] = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(a_, a_ )
else f"""\t{accelerate_config}"""
)
print(a_ )
_UpperCAmelCase : Optional[int] = accelerate_config
return info
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = env_command_parser()
_UpperCAmelCase : int = parser.parse_args()
env_command(a_ )
return 0
if __name__ == "__main__":
raise SystemExit(main()) | 363 | '''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MobileViTFeatureExtractor']
__a = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 364 | '''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCAmelCase : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a_ ) ),
}, features=a_, )
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
__a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCAmelCase : Tuple = FILE_CONTENT
with open(a_, "w" ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" )
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import gzip
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCAmelCase : Any = bytes(a_, "utf-8" )
with gzip.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCAmelCase : str = bytes(a_, "utf-8" )
with lza.frame.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Any ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a_, "w" ) as archive:
archive.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
import tarfile
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
import lzma
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
with lzma.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: Tuple ):
import zipfile
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCAmelCase : int = bytes(a_, "utf-8" )
with zstd.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCAmelCase : Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a_, "w" ) as f:
f.write(a_ )
return filename
__a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : str = datasets.Dataset.from_dict(a_ )
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_UpperCAmelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str, a_: str ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a_, "rb" ) as f:
_UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ):
_UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCAmelCase : Dict = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a_, "wb" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ )
_UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : str = {"data": DATA}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ):
import gzip
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ):
import gzip
_UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : List[str] = ["0", "1", "2", "3"]
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Dict = ["0", "1", "2", "3"]
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = ["0", "1", "2", "3"]
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename("unsupported.ext" ) )
f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir | 17 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure ):
"""simple docstring"""
UpperCamelCase_ : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def __UpperCAmelCase ( ):
if os.name == "nt":
_UpperCAmelCase : Dict = CursorInfo()
_UpperCAmelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
_UpperCAmelCase : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __UpperCAmelCase ( ):
if os.name == "nt":
_UpperCAmelCase : Tuple = CursorInfo()
_UpperCAmelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
_UpperCAmelCase : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __UpperCAmelCase ( ):
try:
hide_cursor()
yield
finally:
show_cursor() | 365 | '''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = BarthezTokenizer
UpperCamelCase_ : List[Any] = BarthezTokenizerFast
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Optional[int] = True
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "<pad>"
_UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_UpperCAmelCase : int = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , ) | 17 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Optional[int]=1_8 , lowerCAmelCase__ : Any=3_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[int]=True , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 2_0}
_UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : Optional[Any] = crop_size
_UpperCAmelCase : Optional[Any] = do_flip_channel_order
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = MobileViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : str = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 366 | '''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = do_convert_rgb
_UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : str = 2_0_4_8
_UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
_UpperCAmelCase : Any = "Hello"
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
_UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : List[Any] = 3
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 17 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : str , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = path_or_paths
_UpperCAmelCase : List[str] = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else "train"
_UpperCAmelCase : Dict = features
_UpperCAmelCase : str = cache_dir
_UpperCAmelCase : Tuple = keep_in_memory
_UpperCAmelCase : Optional[Any] = streaming
_UpperCAmelCase : Optional[int] = num_proc
_UpperCAmelCase : Tuple = kwargs
@abstractmethod
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Optional[int] , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = features
_UpperCAmelCase : Any = cache_dir
_UpperCAmelCase : Optional[Any] = keep_in_memory
_UpperCAmelCase : Union[str, Any] = streaming
_UpperCAmelCase : Optional[Any] = num_proc
_UpperCAmelCase : Optional[int] = kwargs
@abstractmethod
def _lowerCAmelCase ( self : Tuple ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass | 367 | '''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''time_series_transformer'''
UpperCamelCase_ : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = prediction_length
_UpperCAmelCase : Optional[Any] = context_length or prediction_length
_UpperCAmelCase : Optional[Any] = distribution_output
_UpperCAmelCase : Union[str, Any] = loss
_UpperCAmelCase : Dict = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : Any = lags_sequence
_UpperCAmelCase : Dict = scaling
_UpperCAmelCase : Tuple = num_dynamic_real_features
_UpperCAmelCase : Dict = num_static_real_features
_UpperCAmelCase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Optional[int] = cardinality
else:
_UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : List[Any] = embedding_dimension
else:
_UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : str = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features
_UpperCAmelCase : str = d_model
_UpperCAmelCase : Optional[Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = encoder_ffn_dim
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Dict = encoder_layers
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 17 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int = 1_000 ):
_UpperCAmelCase : Dict = 1, 1
_UpperCAmelCase : Dict = 2
while True:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[Any] = fa + fa
_UpperCAmelCase : Dict = fa, f
index += 1
for _ in str(a_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 368 | '''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded) | 17 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any]=1_3 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Any=9_9 , lowerCAmelCase__ : Union[str, Any]=6_4 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Tuple=6_4 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Any=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Dict = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : List[str] = scope
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = None
if self.use_input_mask:
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = MPNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = MPNetForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Tuple = MPNetForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = self.num_choices
_UpperCAmelCase : Union[str, Any] = MPNetForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.num_labels
_UpperCAmelCase : str = MPNetForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(_UpperCAmelCase) : List[Any] = config_and_inputs
_UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[int] = True
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MPNetModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCAmelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : str = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCAmelCase : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )[0]
_UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : Dict = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) ) | 369 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = 1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = TFXGLMModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCAmelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
_UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCAmelCase : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] ) | 17 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = logging.get_verbosity()
_UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : str = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = logging.log_levels[env_level_str]
_UpperCAmelCase : Optional[int] = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_UpperCAmelCase : Optional[Any] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Tuple = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : int = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Optional[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + "\n" )
def __UpperCAmelCase ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 370 | '''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def __UpperCAmelCase ( a_: Tuple, a_: Any ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ),
], )
def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ):
_UpperCAmelCase : Tuple = str(a_ )
dataset_info.write_to_directory(a_ )
_UpperCAmelCase : Any = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_, "dataset_info.json" ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, )
_UpperCAmelCase : Tuple = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_UpperCAmelCase : List[Any] = yaml.safe_dump(a_ )
_UpperCAmelCase : Optional[int] = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = DatasetInfo()
_UpperCAmelCase : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
], )
def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ):
_UpperCAmelCase : Union[str, Any] = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
_UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_, "README.md" ) ) | 17 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = 3
_UpperCAmelCase : Optional[int] = 2_5_0
_UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self._get_tensors(5 )
_UpperCAmelCase : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Union[str, Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = MaxLengthCriteria(max_length=1_0 )
_UpperCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
_UpperCAmelCase : Tuple = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
_UpperCAmelCase : Union[str, Any] = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 ) | 371 | '''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(map(a_, str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 17 | 0 |
'''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : int ) -> list:
A_ = [True] * n
A_ = False
A_ = False
A_ = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A_ = i * 2
while index < n:
A_ = False
A_ = index + i
A_ = [2]
for i in range(3, _UpperCamelCase, 2 ):
if is_prime[i]:
primes.append(_UpperCamelCase )
return primes
def _UpperCAmelCase ( _UpperCamelCase : int = 99_99_66_66_33_33 ) -> int:
A_ = math.floor(math.sqrt(_UpperCamelCase ) ) + 1_00
A_ = prime_sieve(_UpperCamelCase )
A_ = 0
A_ = 0
A_ = primes[prime_index]
while (last_prime**2) <= limit:
A_ = primes[prime_index + 1]
A_ = last_prime**2
A_ = next_prime**2
# Get numbers divisible by lps(current)
A_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A_ = test_metrics
@require_cpu
def __A ( self ) -> Optional[int]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __A ( self ) -> Optional[int]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __A ( self ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def __A ( self ) -> Any:
print(F'''Found {torch.cuda.device_count()} devices.''' )
A_ = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 18 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case : Tuple = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ['CLIPFeatureExtractor']
__snake_case : str = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
A_ = 1
A_ = 2
while i * i <= n:
A_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = 1
A_ = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> Dict:
A_ = 1
A_ = 3
A_ = (32, 32)
A_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def __A ( self ) -> Tuple:
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
A_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> str:
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> str:
A_ = torch.ones([0] )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def __A ( self ) -> Tuple:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A_ = 77
A_ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
A_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A_ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
A_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
A_ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = '''A painting of a squirrel eating a burger'''
A_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_SCREAMING_SNAKE_CASE , )
A_ = output.images
A_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self ) -> Union[str, Any]:
A_ = self.dummy_cond_unet
A_ = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ = self.dummy_vae
A_ = self.dummy_text_encoder
A_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A_ = 77
A_ = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
# put models in fp16
A_ = unet.half()
A_ = vae.half()
A_ = bert.half()
# make sure here that pndm scheduler skips prk
A_ = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
A_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
A_ = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = '''A painting of a squirrel eating a burger'''
A_ = torch.manual_seed(0 )
A_ = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , image=_SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self ) -> str:
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
A_ = init_image.resize((760, 504) )
A_ = '''BAAI/AltDiffusion'''
A_ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ = output.images[0]
A_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A_ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> str:
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ = init_image.resize((768, 512) )
A_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
A_ = '''BAAI/AltDiffusion'''
A_ = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
A_ = '''A fantasy landscape, trending on artstation'''
A_ = torch.manual_seed(0 )
A_ = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 18 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int ) -> str:
A_ = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
A_ ,A_ = divmod(_UpperCamelCase, 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> str:
A_ = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
A_ = '''-''' if number.startswith('''-''' ) else ''''''
A_ = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(_UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=1 , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = q_groups
A_ = k_groups
A_ = v_groups
A_ = post_attention_groups
A_ = intermediate_groups
A_ = output_groups
def __A ( self ) -> List[Any]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Tuple:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
A_ = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = self.num_labels
A_ = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = self.num_labels
A_ = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = self.num_choices
A_ = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Any:
A_ = self.prepare_config_and_inputs()
((A_) ,(A_) ,(A_) ,(A_) ,(A_) ,(A_)) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowercase : List[str] = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[str] = False
__lowercase : List[str] = True
__lowercase : Optional[int] = False
def __A ( self ) -> List[Any]:
A_ = SqueezeBertModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def __A ( self ) -> str:
self.config_tester.run_common_tests()
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
A_ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
A_ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
A_ = model(_SCREAMING_SNAKE_CASE )[0]
A_ = torch.Size((1, 3) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
if "model" in sd.keys():
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
# pop unnecessary weights
A_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
A_ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ = sd.pop(_UpperCamelCase )
A_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ = sd[key]
# We split QKV in separate Q,K,V
A_ = key.replace('''.qkv_proj.''', '''.q_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.k_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.v_proj.''' )
A_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ ,A_ ,A_ = torch.split(_UpperCamelCase, depth // 3, dim=0 )
A_ = q
A_ = k
A_ = v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=None ) -> Dict:
A_ = load_checkpoint(_UpperCamelCase )
if config is not None:
A_ = OPTConfig.from_pretrained(_UpperCamelCase )
else:
A_ = OPTConfig()
A_ = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ = [tuple(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else key for key in keys]
A_ = Counter(_SCREAMING_SNAKE_CASE )
A_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
A_ = super().construct_mapping(_SCREAMING_SNAKE_CASE , deep=_SCREAMING_SNAKE_CASE )
self._check_no_duplicates_on_constructed_node(_SCREAMING_SNAKE_CASE )
return mapping
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Tuple[Optional[str], str]:
A_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ = full_content[1:].index('''---''' ) + 1
A_ = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as readme_file:
A_ ,A_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_SCREAMING_SNAKE_CASE )
else:
return cls()
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if path.exists():
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as readme_file:
A_ = readme_file.read()
else:
A_ = None
A_ = self._to_readme(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> str:
if readme_content is not None:
A_ ,A_ = _split_yaml_from_readme(_SCREAMING_SNAKE_CASE )
A_ = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
A_ = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE ) -> "DatasetMetadata":
A_ = yaml.load(_SCREAMING_SNAKE_CASE , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE , encoding='''utf-8''' , ).decode('''utf-8''' )
__snake_case : Any = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__snake_case : Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__snake_case : List[str] = ap.parse_args()
__snake_case : Union[str, Any] = Path(args.readme_filepath)
__snake_case : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | '''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__snake_case : Tuple = {'allegro/herbert-base-cased': 514}
__snake_case : List[str] = {}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__snake_case : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__snake_case : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def _UpperCAmelCase ( _UpperCamelCase : Vector, _UpperCamelCase : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def _UpperCAmelCase ( _UpperCamelCase : Vector, _UpperCamelCase : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase, _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def _UpperCAmelCase ( ) -> None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''', number=1_00_00, globals=globals(), ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''', number=1_00_00, globals=globals(), ) )
benchmark()
| 18 | '''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
A_ = subparsers.add_parser('''env''' )
else:
A_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = is_xpu_available()
A_ = is_npu_available()
A_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
A_ = load_config_from_file(args.config_file ).to_dict()
A_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
A_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
A_ = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase, _UpperCamelCase )
else F'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
A_ = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
A_ = env_command_parser()
A_ = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18 | 1 |
'''simple docstring'''
import random
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
A_ = num - 1
A_ = 0
while s % 2 == 0:
A_ = s // 2
t += 1
for _ in range(5 ):
A_ = random.randrange(2, num - 1 )
A_ = pow(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if v != 1:
A_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ = i + 1
A_ = (v**2) % num
return True
def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool:
if num < 2:
return False
A_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : int = 10_24 ) -> int:
while True:
A_ = random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(_UpperCamelCase ):
return num
if __name__ == "__main__":
__snake_case : Optional[int] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
A_ = start
A_ = end
A_ = val
A_ = (start + end) // 2
A_ = left
A_ = right
def __repr__( self ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = collection
A_ = function
if self.collection:
A_ = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
A_ = (start + end) // 2
A_ = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
if node.start == i and node.end == i:
A_ = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = self.fn(node.left.val , node.right.val )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
if self.root is not None:
A_ = Queue()
queue.put(self.root )
while not queue.empty():
A_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
__snake_case : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | '''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
__snake_case : Optional[int] = list[tuple[int, int]]
__snake_case : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case : Tuple = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
A_ = pos_x
A_ = pos_y
A_ = (pos_y, pos_x)
A_ = goal_x
A_ = goal_y
A_ = g_cost
A_ = parent
A_ = self.calculate_heuristic()
def __A ( self ) -> float:
A_ = abs(self.pos_x - self.goal_x )
A_ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ) -> bool:
return self.f_cost < other.f_cost
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
A_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE )
A_ = [self.start]
A_ = []
A_ = False
def __A ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A_ = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
A_ = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A_ = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> list[Node]:
A_ = []
for action in delta:
A_ = parent.pos_x + action[1]
A_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Path:
A_ = node
A_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case : Tuple = (0, 0)
__snake_case : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__snake_case : Optional[Any] = GreedyBestFirst(init, goal)
__snake_case : str = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case : Optional[Any] = 2
for elem in grid:
print(elem)
| 18 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
__snake_case : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__snake_case : List[str] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__snake_case : int = [2, 4, 1, 5]
__snake_case : str = len(train_data)
__snake_case : Union[str, Any] = 0.009
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : List[Any]="train" ) -> Tuple:
return calculate_hypothesis_value(_UpperCamelCase, _UpperCamelCase ) - output(
_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
A_ = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any] ) -> Dict:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Optional[Any] ) -> List[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : Any=m ) -> Union[str, Any]:
A_ = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]:
A_ = summation_of_cost_derivative(_UpperCamelCase, _UpperCamelCase ) / m
return cost_derivative_value
def _UpperCAmelCase ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A_ = 0.0_0_0_0_0_2
A_ = 0
A_ = 0
while True:
j += 1
A_ = [0, 0, 0, 0]
for i in range(0, len(_UpperCamelCase ) ):
A_ = get_cost_derivative(i - 1 )
A_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase, _UpperCamelCase, atol=_UpperCamelCase, rtol=_UpperCamelCase, ):
break
A_ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _UpperCAmelCase ( ) -> str:
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase, '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase, '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 18 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
A_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''sshleifer/tiny-gpt2'''
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> List[Any]:
A_ = '''sgugger/tiny-distilbert-classification'''
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> int:
A_ = '''sshleifer/tiny-gpt2'''
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Dict:
A_ = '''sshleifer/tiny-gpt2'''
A_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> List[str]:
A_ = '''sshleifer/tiny-gpt2'''
A_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Dict:
A_ = '''sshleifer/tiny-gpt2'''
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ) -> List[str]:
A_ = '''sshleifer/tiny-gpt2'''
A_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ) -> str:
A_ = '''patrickvonplaten/t5-tiny-random'''
A_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __A ( self ) -> Optional[Any]:
A_ = '''sshleifer/tiny-gpt2'''
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> List[Any]:
A_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''env.csv''' ) , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''env.csv''' ) ).exists() )
def __A ( self ) -> List[Any]:
A_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''sequential''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''cumulative''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''current''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , '''log.txt''' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
A_ = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
A_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''log.txt''' ) ).exists() )
| 18 | '''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = CodeGenTokenizer
__lowercase : List[Any] = CodeGenTokenizerFast
__lowercase : List[str] = True
__lowercase : str = {'add_prefix_space': True}
__lowercase : Optional[int] = False
def __A ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A_ = {'''unk_token''': '''<unk>'''}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = '''lower newer'''
A_ = '''lower newer'''
return input_text, output_text
def __A ( self ) -> List[Any]:
A_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ = '''lower newer'''
A_ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokens + [tokenizer.unk_token]
A_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
# Testing tokenization
A_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
A_ = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing the unknown token
A_ = tokens + [rust_tokenizer.unk_token]
A_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , _SCREAMING_SNAKE_CASE=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
A_ = '''This is a simple input'''
A_ = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ = ('''This is a simple input''', '''This is a pair''')
A_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def __A ( self ) -> Dict:
A_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
A_ = '''This is a simple input'''
A_ = ['''This is a simple input looooooooong''', '''This is a simple input''']
A_ = ('''This is a simple input''', '''This is a pair''')
A_ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A_ = tokenizer.pad_token_id
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ = tokenizer(*_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __A ( self ) -> int:
A_ = '''$$$'''
A_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_SCREAMING_SNAKE_CASE , add_bos_token=_SCREAMING_SNAKE_CASE )
A_ = '''This is a simple input'''
A_ = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ = tokenizer.bos_token_id
A_ = tokenizer(_SCREAMING_SNAKE_CASE )
A_ = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A_ = tokenizer.decode(out_s.input_ids )
A_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ) -> Optional[Any]:
A_ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
A_ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
A_ = '''\nif len_a > len_b: result = a\nelse: result = b'''
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE )
A_ = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE , truncate_before_pattern=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
pass
| 18 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'mgp-str'
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 18 | 1 |
'''simple docstring'''
from math import isqrt
def _UpperCAmelCase ( _UpperCamelCase : int ) -> list[int]:
A_ = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, _UpperCamelCase, _UpperCamelCase ):
A_ = False
return [i for i in range(2, _UpperCamelCase ) if is_prime[i]]
def _UpperCAmelCase ( _UpperCamelCase : int = 10**8 ) -> int:
A_ = calculate_prime_numbers(max_number // 2 )
A_ = 0
A_ = 0
A_ = len(_UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 18 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : str ) -> list[int]:
A_ = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
A_ ,A_ = 0, 0
for i in range(1, len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
A_ = min(right_pointer - i + 1, z_result[i - left_pointer] )
A_ = min_edge
while go_next(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A_ ,A_ = i, i + z_result[i] - 1
return z_result
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : str ) -> bool:
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> int:
A_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case : int = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Dict = 'ernie_m'
__lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _SCREAMING_SNAKE_CASE = 25_0002 , _SCREAMING_SNAKE_CASE = 768 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 3072 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 514 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1E-05 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = classifier_dropout
A_ = is_decoder
A_ = act_dropout
| 18 | '''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if rouge_types is None:
A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = scoring.BootstrapAggregator()
else:
A_ = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = aggregator.aggregate()
else:
A_ = {}
for key in scores[0]:
A_ = [score[key] for score in scores]
return result
| 18 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : Dict = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'open-llama'
def __init__( self , _SCREAMING_SNAKE_CASE=10_0000 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=1_1008 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = intermediate_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = initializer_range
A_ = rms_norm_eps
A_ = use_cache
A_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _SCREAMING_SNAKE_CASE )
A_ = hidden_dropout_prob
A_ = attention_dropout_prob
A_ = use_stable_embedding
A_ = shared_input_output_embedding
A_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
A_ = self.rope_scaling.get('''type''' , _SCREAMING_SNAKE_CASE )
A_ = self.rope_scaling.get('''factor''' , _SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 18 | '''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = ReformerTokenizer
__lowercase : List[Any] = ReformerTokenizerFast
__lowercase : Union[str, Any] = True
__lowercase : Dict = False
__lowercase : List[str] = True
def __A ( self ) -> Optional[Any]:
super().setUp()
A_ = ReformerTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[int]:
A_ = '''<s>'''
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1000 )
def __A ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ) -> Any:
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = '''I was born in 92000, and this is falsé.'''
A_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE )
A_ = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE=15 ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
A_ = '''This is a simple input'''
A_ = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ = ('''This is a simple input''', '''This is a pair''')
A_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def __A ( self ) -> Dict:
pass
def __A ( self ) -> Union[str, Any]:
A_ = ReformerTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
A_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
A_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A_ = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __A ( self ) -> Union[str, Any]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def __A ( self ) -> Any:
A_ = '''Hello World!'''
A_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def __A ( self ) -> Dict:
A_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def __A ( self ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ = ''' '''.join(_SCREAMING_SNAKE_CASE )
A_ = self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
A_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ = encoded_sequence['''input_ids'''].shape
A_ = ReformerModel(_SCREAMING_SNAKE_CASE )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> Tuple:
# fmt: off
A_ = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_SCREAMING_SNAKE_CASE , sequences=_SCREAMING_SNAKE_CASE , )
| 18 | '''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Transformers CLI tool''', usage='''transformers-cli <command> [<args>]''' )
A_ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
A_ = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 18 | '''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : bool = True, _UpperCamelCase : float = math.inf, _UpperCamelCase : float = -math.inf, _UpperCamelCase : float = math.inf, _UpperCamelCase : float = -math.inf, _UpperCamelCase : bool = False, _UpperCamelCase : float = 1_00, _UpperCamelCase : float = 0.0_1, _UpperCamelCase : float = 1, ) -> Any:
A_ = False
A_ = search_prob
A_ = start_temperate
A_ = []
A_ = 0
A_ = None
while not search_end:
A_ = current_state.score()
if best_state is None or current_score > best_state.score():
A_ = current_state
scores.append(_UpperCamelCase )
iterations += 1
A_ = None
A_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
A_ = random.randint(0, len(_UpperCamelCase ) - 1 ) # picking a random neighbor
A_ = neighbors.pop(_UpperCamelCase )
A_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
A_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
A_ = picked_neighbor
else:
A_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
A_ = picked_neighbor
A_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
A_ = True
else:
A_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ), _UpperCamelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Any ) -> int:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__snake_case : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any] ) -> int:
return (3 * x**2) - (6 * y)
__snake_case : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
__snake_case : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case : Any = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
A_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __A ( self ) -> Dict:
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.dummy_uncond_unet
A_ = DDIMScheduler()
A_ = self.dummy_vq_model
A_ = LDMPipeline(unet=_SCREAMING_SNAKE_CASE , vqvae=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = torch.manual_seed(0 )
A_ = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' ).images
A_ = torch.manual_seed(0 )
A_ = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' , return_dict=_SCREAMING_SNAKE_CASE )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
A_ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
A_ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ = torch.manual_seed(0 )
A_ = ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='''numpy''' ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
A_ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__snake_case : Dict = False
__snake_case : Optional[int] = False
def _UpperCAmelCase ( _UpperCamelCase : Namespace ) -> Optional[Any]:
return TrainCommand(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@staticmethod
def __A ( _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_SCREAMING_SNAKE_CASE , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_SCREAMING_SNAKE_CASE , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_SCREAMING_SNAKE_CASE , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_SCREAMING_SNAKE_CASE , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_SCREAMING_SNAKE_CASE , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_SCREAMING_SNAKE_CASE , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_SCREAMING_SNAKE_CASE , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_SCREAMING_SNAKE_CASE , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_SCREAMING_SNAKE_CASE , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_SCREAMING_SNAKE_CASE , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = logging.get_logger('''transformers-cli/training''' )
A_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_SCREAMING_SNAKE_CASE )
A_ = args.output
A_ = args.column_label
A_ = args.column_text
A_ = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
A_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
A_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
A_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A_ = args.validation_split
A_ = args.train_batch_size
A_ = args.valid_batch_size
A_ = args.learning_rate
A_ = args.adam_epsilon
def __A ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __A ( self ) -> Union[str, Any]:
raise NotImplementedError
def __A ( self ) -> Union[str, Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
A_ = 1
A_ = 2
while i * i <= n:
A_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = 1
A_ = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18 | 1 |
'''simple docstring'''
import random
from typing import Any
def _UpperCAmelCase ( _UpperCamelCase : list ) -> list[Any]:
for _ in range(len(_UpperCamelCase ) ):
A_ = random.randint(0, len(_UpperCamelCase ) - 1 )
A_ = random.randint(0, len(_UpperCamelCase ) - 1 )
A_ ,A_ = data[b], data[a]
return data
if __name__ == "__main__":
__snake_case : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__snake_case : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 18 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__snake_case : Optional[Any] = logging.getLogger(__name__)
def _UpperCAmelCase ( ) -> Union[str, Any]:
A_ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''', type=_UpperCamelCase, default='''data/dump.txt''', help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''', type=_UpperCamelCase, default='''bert''', choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''', type=_UpperCamelCase, default='''bert-base-uncased''', help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''', type=_UpperCamelCase, default='''data/dump''', help='''The dump file prefix.''' )
A_ = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A_ = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A_ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A_ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A_ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path, '''r''', encoding='''utf8''' ) as fp:
A_ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(_UpperCamelCase )} examples to process.''' )
A_ = []
A_ = 0
A_ = 1_00_00
A_ = time.time()
for text in data:
A_ = F'''{bos} {text.strip()} {sep}'''
A_ = tokenizer.encode(_UpperCamelCase, add_special_tokens=_UpperCamelCase )
rslt.append(_UpperCamelCase )
iter += 1
if iter % interval == 0:
A_ = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A_ = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(_UpperCamelCase )} examples processed.''' )
A_ = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ = [np.uintaa(_UpperCamelCase ) for d in rslt]
else:
A_ = [np.intaa(_UpperCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(_UpperCamelCase, '''wb''' ) as handle:
pickle.dump(rslt_, _UpperCamelCase, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 18 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18 | 1 |
'''simple docstring'''
import requests
__snake_case : int = 'YOUR API KEY'
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str = giphy_api_key ) -> list:
A_ = '''+'''.join(query.split() )
A_ = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A_ = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir('fixtures/test_sentencepiece.model')
__snake_case : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'}
__snake_case : List[str] = '>>zh<<'
__snake_case : Any = 'Helsinki-NLP/'
if is_torch_available():
__snake_case : List[str] = 'pt'
elif is_tf_available():
__snake_case : Optional[int] = 'tf'
else:
__snake_case : List[str] = 'jax'
@require_sentencepiece
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = MarianTokenizer
__lowercase : Union[str, Any] = False
__lowercase : Dict = True
def __A ( self ) -> Union[str, Any]:
super().setUp()
A_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
A_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
A_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def __A ( self ) -> List[str]:
A_ = '''</s>'''
A_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 9 )
def __A ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __A ( self ) -> Tuple:
A_ = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
A_ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0] )
A_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = [x.name for x in Path(_SCREAMING_SNAKE_CASE ).glob('''*''' )]
self.assertIn('''source.spm''' , _SCREAMING_SNAKE_CASE )
MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
A_ = self.get_tokenizer()
A_ = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __A ( self ) -> List[Any]:
A_ = self.get_tokenizer()
A_ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __A ( self ) -> Union[str, Any]:
# fmt: off
A_ = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __A ( self ) -> Optional[Any]:
A_ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
A_ = '''Tämä on testi'''
A_ = '''This is a test'''
A_ = [76, 7, 2047, 2]
A_ = [69, 12, 11, 940, 2]
A_ = tokenizer(_SCREAMING_SNAKE_CASE ).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokenizer(text_target=_SCREAMING_SNAKE_CASE ).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
if "model" in sd.keys():
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
# pop unnecessary weights
A_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
A_ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ = sd.pop(_UpperCamelCase )
A_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ = sd[key]
# We split QKV in separate Q,K,V
A_ = key.replace('''.qkv_proj.''', '''.q_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.k_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.v_proj.''' )
A_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ ,A_ ,A_ = torch.split(_UpperCamelCase, depth // 3, dim=0 )
A_ = q
A_ = k
A_ = v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=None ) -> Dict:
A_ = load_checkpoint(_UpperCamelCase )
if config is not None:
A_ = OPTConfig.from_pretrained(_UpperCamelCase )
else:
A_ = OPTConfig()
A_ = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | '''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__snake_case : Tuple = {'allegro/herbert-base-cased': 514}
__snake_case : List[str] = {}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : List[Any] = 'decision_transformer'
__lowercase : List[Any] = ['past_key_values']
__lowercase : List[Any] = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
A_ = state_dim
A_ = act_dim
A_ = hidden_size
A_ = max_ep_len
A_ = action_tanh
A_ = vocab_size
A_ = n_positions
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = scale_attn_weights
A_ = use_cache
A_ = scale_attn_by_inverse_layer_idx
A_ = reorder_and_upcast_attn
A_ = bos_token_id
A_ = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
A_ = subparsers.add_parser('''env''' )
else:
A_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = is_xpu_available()
A_ = is_npu_available()
A_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
A_ = load_config_from_file(args.config_file ).to_dict()
A_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
A_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
A_ = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase, _UpperCamelCase )
else F'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
A_ = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
A_ = env_command_parser()
A_ = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __UpperCAmelCase ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = load_tool('''text-to-speech''' )
self.tool.setup()
def __A ( self ) -> Any:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A_ = self.tool('''hey''' )
A_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self ) -> List[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A_ = self.tool('''hey''' )
A_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 18 | '''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
__snake_case : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _UpperCAmelCase ( _UpperCamelCase : list[float] ) -> list[float]:
A_ = []
A_ = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
A_ = -1
for j in range(i + 1, _UpperCamelCase ):
if arr[i] < arr[j]:
A_ = arr[j]
break
result.append(_UpperCamelCase )
return result
def _UpperCAmelCase ( _UpperCamelCase : list[float] ) -> list[float]:
A_ = []
for i, outer in enumerate(_UpperCamelCase ):
A_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
A_ = inner
break
result.append(_UpperCamelCase )
return result
def _UpperCAmelCase ( _UpperCamelCase : list[float] ) -> list[float]:
A_ = len(_UpperCamelCase )
A_ = []
A_ = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
A_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 18 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = ['image_processor', 'tokenizer']
__lowercase : int = 'CLIPImageProcessor'
__lowercase : Union[str, Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
A_ = kwargs.pop('''feature_extractor''' )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A_ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
A_ = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> Optional[Any]:
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 18 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> Union[str, Any]:
A_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
A_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
A_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
A_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
A_ = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
A_ = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
A_ = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
A_ = -(labels.shape[-1] * loss.item())
A_ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 18 | '''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : str = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'realm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1E-3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=1335_3718 , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 18 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'mgp-str'
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 18 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = 'linear'
__lowercase : int = 'cosine'
__lowercase : str = 'cosine_with_restarts'
__lowercase : int = 'polynomial'
__lowercase : str = 'constant'
__lowercase : Optional[Any] = 'constant_with_warmup'
__lowercase : Tuple = 'piecewise_constant'
def _UpperCAmelCase ( _UpperCamelCase : Optimizer, _UpperCamelCase : int = -1 ) -> Any:
return LambdaLR(_UpperCamelCase, lambda _UpperCamelCase : 1, last_epoch=_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Optimizer, _UpperCamelCase : int, _UpperCamelCase : int = -1 ) -> List[str]:
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0, _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase, _UpperCamelCase, last_epoch=_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Optimizer, _UpperCamelCase : str, _UpperCamelCase : int = -1 ) -> Optional[int]:
A_ = {}
A_ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
A_ ,A_ = rule_str.split(''':''' )
A_ = int(_UpperCamelCase )
A_ = float(_UpperCamelCase )
A_ = value
A_ = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase : Union[str, Any], _UpperCamelCase : Any ):
def rule_func(_UpperCamelCase : int ) -> float:
A_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ = create_rules_function(_UpperCamelCase, _UpperCamelCase )
return LambdaLR(_UpperCamelCase, _UpperCamelCase, last_epoch=_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Optional[Any], _UpperCamelCase : Any, _UpperCamelCase : Optional[int]=-1 ) -> int:
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1, _UpperCamelCase ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Optimizer, _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : float = 0.5, _UpperCamelCase : int = -1 ) -> str:
def lr_lambda(_UpperCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1, _UpperCamelCase ) )
A_ = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Optimizer, _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : int = 1, _UpperCamelCase : int = -1 ) -> Any:
def lr_lambda(_UpperCamelCase : List[Any] ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1, _UpperCamelCase ) )
A_ = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any], _UpperCamelCase : Dict=1E-7, _UpperCamelCase : Dict=1.0, _UpperCamelCase : Tuple=-1 ) -> Optional[Any]:
A_ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1, _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ = lr_init - lr_end
A_ = num_training_steps - num_warmup_steps
A_ = 1 - (current_step - num_warmup_steps) / decay_steps
A_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
__snake_case : List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _UpperCAmelCase ( _UpperCamelCase : Union[str, SchedulerType], _UpperCamelCase : Optimizer, _UpperCamelCase : Optional[str] = None, _UpperCamelCase : Optional[int] = None, _UpperCamelCase : Optional[int] = None, _UpperCamelCase : int = 1, _UpperCamelCase : float = 1.0, _UpperCamelCase : int = -1, ) -> List[str]:
A_ = SchedulerType(_UpperCamelCase )
A_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase, last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase, step_rules=_UpperCamelCase, last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase, num_warmup_steps=_UpperCamelCase, last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase, num_warmup_steps=_UpperCamelCase, num_training_steps=_UpperCamelCase, num_cycles=_UpperCamelCase, last_epoch=_UpperCamelCase, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase, num_warmup_steps=_UpperCamelCase, num_training_steps=_UpperCamelCase, power=_UpperCamelCase, last_epoch=_UpperCamelCase, )
return schedule_func(
_UpperCamelCase, num_warmup_steps=_UpperCamelCase, num_training_steps=_UpperCamelCase, last_epoch=_UpperCamelCase )
| 18 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__snake_case : int = TypeVar('T')
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
return (position - 1) // 2
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
return (2 * position) + 1
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
return (2 * position) + 2
class __UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
A_ = []
A_ = {}
A_ = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __A ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
A_ = self.elements
self.elements += 1
self._bubble_up(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A_ ,A_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A_ ,A_ = self.heap[0]
self._bubble_down(_SCREAMING_SNAKE_CASE )
return elem
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
# Update the weight of the given key
A_ = self.position_map[elem]
A_ = (elem, weight)
if position > 0:
A_ = get_parent_position(_SCREAMING_SNAKE_CASE )
A_ ,A_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
A_ = self.position_map[elem]
if curr_pos == 0:
return None
A_ = get_parent_position(_SCREAMING_SNAKE_CASE )
A_ ,A_ = self.heap[curr_pos]
A_ ,A_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_up(_SCREAMING_SNAKE_CASE )
return None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
A_ = self.position_map[elem]
A_ ,A_ = self.heap[curr_pos]
A_ = get_child_left_position(_SCREAMING_SNAKE_CASE )
A_ = get_child_right_position(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
A_ ,A_ = self.heap[child_left_position]
A_ ,A_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
A_ ,A_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
A_ ,A_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
return None
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
# Swap the nodes at the given positions
A_ = self.heap[nodea_pos][0]
A_ = self.heap[nodea_pos][0]
A_ ,A_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A_ = nodea_pos
A_ = nodea_pos
class __UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
A_ = {}
A_ = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A_ = {}
self.nodes += 1
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_SCREAMING_SNAKE_CASE )
self.add_node(_SCREAMING_SNAKE_CASE )
A_ = weight
A_ = weight
def _UpperCAmelCase ( _UpperCamelCase : GraphUndirectedWeighted[T], ) -> tuple[dict[T, int], dict[T, T | None]]:
A_ = {node: maxsize for node in graph.connections}
A_ = {node: None for node in graph.connections}
A_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCamelCase, _UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
A_ = priority_queue.extract_min()
A_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCamelCase, dist[neighbour] )
A_ = node
# running prim's algorithm
while not priority_queue.is_empty():
A_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCamelCase, dist[neighbour] )
A_ = node
return dist, parent
| 18 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | '''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if rouge_types is None:
A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = scoring.BootstrapAggregator()
else:
A_ = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = aggregator.aggregate()
else:
A_ = {}
for key in scores[0]:
A_ = [score[key] for score in scores]
return result
| 18 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00, _UpperCamelCase : int = 10 ) -> int:
A_ = defaultdict(_UpperCamelCase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
A_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCamelCase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 18 | '''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> str:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = rotary_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = vocab_size - 1
A_ = vocab_size - 1
A_ = vocab_size - 1
def __A ( self ) -> Any:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self ) -> Any:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A_ = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase : Optional[int] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self ) -> Optional[Any]:
A_ = FlaxGPTJModelTester(self )
def __A ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __A ( self ) -> str:
A_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
A_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = False
A_ = model.config.eos_token_id
A_ = jax.jit(model.generate )
A_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __A ( self ) -> Union[str, Any]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A_ = fx_state
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
A_ = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
A_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __A ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18 | 1 |
'''simple docstring'''
import sys
__snake_case : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
A_ = 1
for digit in s:
product *= int(_UpperCamelCase )
return product
def _UpperCAmelCase ( _UpperCamelCase : str = N ) -> int:
A_ = -sys.maxsize - 1
A_ = n[:13]
A_ = 13
while cur_index < len(_UpperCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
A_ = substr[1:] + n[cur_index]
cur_index += 1
else:
A_ = max(_UpperCamelCase, str_eval(_UpperCamelCase ) )
A_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 18 | '''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''
__snake_case : Any = ''
__snake_case : int = ''
__snake_case : int = ''
def _UpperCAmelCase ( _UpperCamelCase : str ) -> None:
# authorize twitter, initialize tweepy
A_ = tweepy.OAuthHandler(_UpperCamelCase, _UpperCamelCase )
auth.set_access_token(_UpperCamelCase, _UpperCamelCase )
A_ = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
A_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A_ = api.user_timeline(screen_name=_UpperCamelCase, count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
A_ = api.user_timeline(
screen_name=_UpperCamelCase, count=2_00, max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
A_ = alltweets[-1].id - 1
print(F'''...{len(_UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
A_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''', '''w''' ) as f:
A_ = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : str = 'ClapFeatureExtractor'
__lowercase : Tuple = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = kwargs.pop('''sampling_rate''' , _SCREAMING_SNAKE_CASE )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
A_ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if audios is not None:
A_ = self.feature_extractor(
_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> Union[str, Any]:
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 18 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'git_vision_model'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = hidden_size
A_ = intermediate_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_channels
A_ = patch_size
A_ = image_size
A_ = initializer_range
A_ = attention_dropout
A_ = layer_norm_eps
A_ = hidden_act
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ ,A_ = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
A_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : List[Any] = 'git'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=101 , _SCREAMING_SNAKE_CASE=102 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if vision_config is None:
A_ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
A_ = GitVisionConfig(**_SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = tie_word_embeddings
A_ = num_image_with_embedding
A_ = bos_token_id
A_ = eos_token_id
def __A ( self ) -> List[str]:
A_ = copy.deepcopy(self.__dict__ )
A_ = self.vision_config.to_dict()
A_ = self.__class__.model_type
return output
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
A_ = 1
A_ = 2
while i * i <= n:
A_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = 1
A_ = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__snake_case : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__snake_case : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__snake_case : List[Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case : Any = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__snake_case : Tuple = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
A_ = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''', _UpperCamelCase )
return [m.group(0 ) for m in matches]
def _UpperCAmelCase ( ) -> str:
A_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A_ = {
config.replace('''Config''', '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A_ = collections.defaultdict(_UpperCamelCase )
A_ = collections.defaultdict(_UpperCamelCase )
A_ = collections.defaultdict(_UpperCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCamelCase ):
A_ = None
if _re_tf_models.match(_UpperCamelCase ) is not None:
A_ = tf_models
A_ = _re_tf_models.match(_UpperCamelCase ).groups()[0]
elif _re_flax_models.match(_UpperCamelCase ) is not None:
A_ = flax_models
A_ = _re_flax_models.match(_UpperCamelCase ).groups()[0]
elif _re_pt_models.match(_UpperCamelCase ) is not None:
A_ = pt_models
A_ = _re_pt_models.match(_UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
A_ = True
break
# Try again after removing the last word in the name
A_ = ''''''.join(camel_case_split(_UpperCamelCase )[:-1] )
A_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A_ = list(_UpperCamelCase )
all_models.sort()
A_ = {'''model_type''': all_models}
A_ = [pt_models[t] for t in all_models]
A_ = [tf_models[t] for t in all_models]
A_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A_ = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A_ = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A_ = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A_ = '''AutoTokenizer'''
A_ = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : int ) -> Any:
A_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A_ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
A_ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCamelCase, _UpperCamelCase ):
continue
# First extract all model_names
A_ = []
for name in getattr(_UpperCamelCase, _UpperCamelCase ).values():
if isinstance(_UpperCamelCase, _UpperCamelCase ):
model_names.append(_UpperCamelCase )
else:
model_names.extend(list(_UpperCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Any ) -> Optional[Any]:
A_ = get_frameworks_table()
A_ = Dataset.from_pandas(_UpperCamelCase )
A_ = hf_hub_download(
'''huggingface/transformers-metadata''', '''pipeline_tags.json''', repo_type='''dataset''', token=_UpperCamelCase )
A_ = Dataset.from_json(_UpperCamelCase )
A_ = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(_UpperCamelCase ) )
}
A_ = update_pipeline_and_auto_class_table(_UpperCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A_ = sorted(table.keys() )
A_ = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
A_ = Dataset.from_pandas(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCamelCase, '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(_UpperCamelCase, '''pipeline_tags.json''' ) )
if commit_sha is not None:
A_ = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
A_ = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''', folder_path=_UpperCamelCase, repo_type='''dataset''', token=_UpperCamelCase, commit_message=_UpperCamelCase, )
def _UpperCAmelCase ( ) -> Tuple:
A_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A_ = transformers_module.pipelines.SUPPORTED_TASKS
A_ = []
for key in pipeline_tasks:
if key not in in_table:
A_ = pipeline_tasks[key]['''pt''']
if isinstance(_UpperCamelCase, (list, tuple) ):
A_ = model[0]
A_ = model.__name__
if model not in in_table.values():
missing.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
A_ = ''', '''.join(_UpperCamelCase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
__snake_case : Union[str, Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 18 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
A_ = tempfile.mkdtemp()
A_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
A_ = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Union[str, Any]:
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> List[str]:
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = self.prepare_image_inputs()
A_ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Optional[Any]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = processor(text=_SCREAMING_SNAKE_CASE )
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> int:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = self.prepare_image_inputs()
A_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __A ( self ) -> Optional[Any]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = self.prepare_image_inputs()
A_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 18 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = 'openai/whisper-base'
__lowercase : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
__lowercase : Any = 'transcriber'
__lowercase : Optional[Any] = WhisperProcessor
__lowercase : int = WhisperForConditionalGeneration
__lowercase : int = ['audio']
__lowercase : List[str] = ['text']
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.model.generate(inputs=_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
A_ = img
A_ = img.shape[1]
A_ = img.shape[0]
A_ = dst_width
A_ = dst_height
A_ = self.src_w / self.dst_w
A_ = self.src_h / self.dst_h
A_ = A_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ) -> Optional[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ = self.img[self.get_y(_SCREAMING_SNAKE_CASE )][self.get_x(_SCREAMING_SNAKE_CASE )]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return int(self.ratio_x * x )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__snake_case , __snake_case : List[str] = 800, 600
__snake_case : int = imread('image_data/lena.jpg', 1)
__snake_case : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 18 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
if "model" in sd.keys():
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
# pop unnecessary weights
A_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
A_ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ = sd.pop(_UpperCamelCase )
A_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ = sd[key]
# We split QKV in separate Q,K,V
A_ = key.replace('''.qkv_proj.''', '''.q_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.k_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.v_proj.''' )
A_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ ,A_ ,A_ = torch.split(_UpperCamelCase, depth // 3, dim=0 )
A_ = q
A_ = k
A_ = v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=None ) -> Dict:
A_ = load_checkpoint(_UpperCamelCase )
if config is not None:
A_ = OPTConfig.from_pretrained(_UpperCamelCase )
else:
A_ = OPTConfig()
A_ = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
def count_of_possible_combinations(_UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
def count_of_possible_combinations_with_dp_array(
_UpperCamelCase : int, _UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ = sum(
count_of_possible_combinations_with_dp_array(target - item, _UpperCamelCase )
for item in array )
A_ = answer
return answer
A_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
A_ = [0] * (target + 1)
A_ = 1
for i in range(1, target + 1 ):
for j in range(_UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = 3
__snake_case : int = 5
__snake_case : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 18 | '''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__snake_case : Tuple = {'allegro/herbert-base-cased': 514}
__snake_case : List[str] = {}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
__snake_case : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 18 | '''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
A_ = subparsers.add_parser('''env''' )
else:
A_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = is_xpu_available()
A_ = is_npu_available()
A_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
A_ = load_config_from_file(args.config_file ).to_dict()
A_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
A_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
A_ = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase, _UpperCamelCase )
else F'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
A_ = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
A_ = env_command_parser()
A_ = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__snake_case : Union[str, Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ['BeitFeatureExtractor']
__snake_case : Optional[int] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[Any]:
A_ = len(_UpperCamelCase )
for i in range(length - 1 ):
A_ = i
for k in range(i + 1, _UpperCamelCase ):
if collection[k] < collection[least]:
A_ = k
if least != i:
A_ ,A_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case : Tuple = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Tuple = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 18 | '''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'rwkv'
__lowercase : Tuple = {'max_position_embeddings': 'context_length'}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0277 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = context_length
A_ = hidden_size
A_ = num_hidden_layers
A_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
A_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
A_ = layer_norm_epsilon
A_ = rescale_every
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowercase : ClassVar[Features] = Features({'image': Image()} )
__lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
__lowercase : str = "image"
__lowercase : str = "labels"
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 18 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | '''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
A_ = BlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def __A ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> str:
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Optional[Any]:
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = self.prepare_image_inputs()
A_ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> List[Any]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = processor(text=_SCREAMING_SNAKE_CASE )
A_ = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> List[str]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = self.prepare_image_inputs()
A_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __A ( self ) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ = '''lower newer'''
A_ = self.prepare_image_inputs()
A_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 18 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = 'mgp-str'
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> int:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = TFViTModel(config=_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ = self.image_size // 2
A_ = pixel_values[:, :, :image_size, :image_size]
A_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
A_ = self.type_sequence_label_size
A_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ = self.image_size // 2
A_ = pixel_values[:, :, :image_size, :image_size]
A_ = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> str:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowercase : List[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowercase : int = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def __A ( self ) -> List[Any]:
A_ = TFViTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> List[str]:
A_ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[Any]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __A ( self ) -> Optional[Any]:
A_ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 18 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = {}
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = super().add_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE ) -> Any:
A_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
else:
A_ = []
for i in range(_SCREAMING_SNAKE_CASE ):
A_ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
A_ = output
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_SCREAMING_SNAKE_CASE ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ = self.token_map[placeholder_token]
A_ = tokens[: 1 + int(len(_SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )]
if vector_shuffle:
A_ = copy.copy(_SCREAMING_SNAKE_CASE )
random.shuffle(_SCREAMING_SNAKE_CASE )
A_ = text.replace(_SCREAMING_SNAKE_CASE , ''' '''.join(_SCREAMING_SNAKE_CASE ) )
return text
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE ) -> Dict:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE ) -> Any:
return super().encode(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 18 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case : Optional[int] = 10
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
for i in range(_UpperCamelCase, _UpperCamelCase ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase ( _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
A_ = 0
A_ = len(_UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
A_ = (left + right) // 3 + 1
A_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A_ = one_third - 1
elif array[two_third] < target:
A_ = two_third + 1
else:
A_ = one_third + 1
A_ = two_third - 1
else:
return -1
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : list[int], _UpperCamelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
A_ = (left + right) // 3 + 1
A_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_UpperCamelCase, one_third - 1, _UpperCamelCase, _UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
else:
return rec_ternary_search(one_third + 1, two_third - 1, _UpperCamelCase, _UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Union[str, Any] = input('Enter numbers separated by comma:\n').strip()
__snake_case : Union[str, Any] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__snake_case : Union[str, Any] = int(input('Enter the number to be found in the list:\n').strip())
__snake_case : Optional[Any] = ite_ternary_search(collection, target)
__snake_case : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 18 | '''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if rouge_types is None:
A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = scoring.BootstrapAggregator()
else:
A_ = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = aggregator.aggregate()
else:
A_ = {}
for key in scores[0]:
A_ = [score[key] for score in scores]
return result
| 18 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case : Optional[Any] = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Any ) -> int:
return torch.atana(_UpperCamelCase, _UpperCamelCase ) / math.pi * 2
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
A_ = torch.sin(t * math.pi / 2 ) ** 2
A_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase, _UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
pass
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__()
A_ = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
A_ = deepcopy(self.diffusion )
A_ = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> Tuple:
A_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case : Union[str, Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case : int = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case : List[Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case : Optional[int] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _UpperCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''', RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase, _UpperCamelCase ):
return name.replace(_UpperCamelCase, _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase, _UpperCamelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Optional[int]=13 ) -> List[str]:
A_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''', '''time_proj''' )
A_ = 0
if string.startswith('''net.3.''' ):
depth += 1
A_ = string[6:]
elif string.startswith('''net.''' ):
A_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
A_ = string[7:]
if string.startswith('''main.''' ):
A_ = string[5:]
# mid block
if string[:2].isdigit():
A_ = string[:2]
A_ = string[2:]
else:
A_ = string[0]
A_ = string[1:]
if depth == max_depth:
A_ = MID_NUM_TO_LAYER[layer_num]
A_ = '''mid_block'''
elif depth > 0 and int(_UpperCamelCase ) < 7:
A_ = DOWN_NUM_TO_LAYER[layer_num]
A_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(_UpperCamelCase ) > 7:
A_ = UP_NUM_TO_LAYER[layer_num]
A_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
A_ = DEPTH_0_TO_LAYER[layer_num]
A_ = F'''up_blocks.{max_depth - 1}''' if int(_UpperCamelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
A_ = string_left[1:]
if "resnets" in new_layer:
A_ = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
A_ = convert_attn_naming(_UpperCamelCase )
A_ = new_string_left
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
A_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
A_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
A_ = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = transform_conv_attns(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
else:
A_ = v
return new_state_dict
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Optional[int], _UpperCamelCase : str ) -> Dict:
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
A_ = v[:, :, 0]
else:
# bias
A_ = v
else:
# qkv matrices
A_ = v.shape[0]
A_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> Optional[Any]:
A_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
A_ = download(_UpperCamelCase )
A_ = MODELS_MAP[model_name]['''sample_rate''']
A_ = MODELS_MAP[model_name]['''sample_size''']
A_ = Object()
A_ = sample_size
A_ = sample_rate
A_ = 0
A_ = UNetaDModel(sample_size=_UpperCamelCase, sample_rate=_UpperCamelCase )
A_ = diffusers_model.state_dict()
A_ = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path, map_location=_UpperCamelCase )['''state_dict'''] )
A_ = orig_model.diffusion_ema.eval()
A_ = orig_model.state_dict()
A_ = rename_orig_weights(_UpperCamelCase )
A_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(_UpperCamelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
A_ = value.squeeze()
A_ = value
diffusers_model.load_state_dict(_UpperCamelCase )
A_ = 1_00
A_ = 33
A_ = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
A_ = torch.manual_seed(_UpperCamelCase )
A_ = torch.randn([1, 2, config.sample_size], generator=_UpperCamelCase ).to(_UpperCamelCase )
A_ = torch.linspace(1, 0, steps + 1, device=_UpperCamelCase )[:-1]
A_ = get_crash_schedule(_UpperCamelCase )
A_ = DanceDiffusionPipeline(unet=_UpperCamelCase, scheduler=_UpperCamelCase )
A_ = torch.manual_seed(33 )
A_ = pipe(num_inference_steps=_UpperCamelCase, generator=_UpperCamelCase ).audios
A_ = sampling.iplms_sample(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, {} )
A_ = generated.clamp(-1, 1 )
A_ = (generated - audio).abs().sum()
A_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''', _UpperCamelCase )
print('''Diff max''', _UpperCamelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : List[Any] = parser.parse_args()
main(args)
| 18 | '''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__snake_case : str = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Dict, _UpperCamelCase : Optional[int]=None, _UpperCamelCase : int=None, _UpperCamelCase : Union[str, Any]=None, _UpperCamelCase : List[str]=None, _UpperCamelCase : int=None, _UpperCamelCase : Dict=None, ) -> str:
if attention_mask is None:
A_ = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
A_ = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
A_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0.02 , ) -> Dict:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
A_ = initializer_range
def __A ( self ) -> List[Any]:
A_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A_ = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
A_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , )
A_ = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __A ( self ) -> int:
A_ ,A_ = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.encode(inputs_dict['''input_ids'''] )
A_ ,A_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = 20
A_ = model_class_name(_SCREAMING_SNAKE_CASE )
A_ = model.encode(inputs_dict['''input_ids'''] )
A_ ,A_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
A_ = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = 99
def __A ( self ) -> int:
A_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A_ = input_ids.shape[0]
A_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __A ( self ) -> Any:
A_ ,A_ ,A_ = self._get_config_and_data()
A_ = FlaxBlenderbotSmallForConditionalGeneration(_SCREAMING_SNAKE_CASE )
A_ = lm_model(input_ids=_SCREAMING_SNAKE_CASE )
A_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A_ = FlaxBlenderbotSmallForConditionalGeneration(_SCREAMING_SNAKE_CASE )
A_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A_ = lm_model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
A_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A_ = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
A_ = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
A_ = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[int] = True
__lowercase : str = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowercase : Tuple = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __A ( self ) -> int:
A_ = FlaxBlenderbotSmallModelTester(self )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
A_ = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
A_ = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ) -> Any:
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A_ = np.ones((1, 1) ) * model.config.eos_token_id
A_ = model(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18 | 1 |
'''simple docstring'''
import os
def _UpperCAmelCase ( _UpperCamelCase : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(_UpperCamelCase ), _UpperCamelCase ) ) as in_file:
A_ = in_file.read()
A_ = [[int(_UpperCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
A_ = [[0 for cell in row] for row in grid]
A_ = len(grid[0] )
A_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
A_ = grid[0][0]
for i in range(1, _UpperCamelCase ):
A_ = grid[0][i] + dp[0][i - 1]
for i in range(1, _UpperCamelCase ):
A_ = grid[i][0] + dp[i - 1][0]
for i in range(1, _UpperCamelCase ):
for j in range(1, _UpperCamelCase ):
A_ = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 18 | '''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Union[str, Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__snake_case : Any = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__snake_case : List[Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Tuple = RoFormerTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
):
A_ = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = pre_tok_class(**_SCREAMING_SNAKE_CASE )
A_ = do_lower_case
def __getstate__( self ) -> int:
A_ = self.__dict__.copy()
A_ = BertPreTokenizer()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = d
A_ = self.__dict__['''_tokenizer'''].get_vocab()
A_ = PreTokenizer.custom(JiebaPreTokenizer(_SCREAMING_SNAKE_CASE ) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Any:
A_ = BertPreTokenizer()
return super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
A_ = 3
A_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 18 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
import os
def _UpperCAmelCase ( ) -> Optional[int]:
with open(os.path.dirname(_UpperCamelCase ) + '''/p022_names.txt''' ) as file:
A_ = str(file.readlines()[0] )
A_ = names.replace('''"''', '''''' ).split(''',''' )
names.sort()
A_ = 0
A_ = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
A_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
A_ = 1
A_ = 2
while i * i <= n:
A_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = 1
A_ = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18 | 1 |
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = None
A_ = None
A_ = graph
self._normalize_graph(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = len(_SCREAMING_SNAKE_CASE )
A_ = None
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_SCREAMING_SNAKE_CASE ) > 1 or len(_SCREAMING_SNAKE_CASE ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self ) -> Any:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = algorithm(self )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self ) -> Dict:
if not self.executed:
self._algorithm()
A_ = True
def __A ( self ) -> Any:
pass
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE )
# use this to save your result
A_ = -1
def __A ( self ) -> Tuple:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any:
super().__init__(_SCREAMING_SNAKE_CASE )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self ) -> str:
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(_SCREAMING_SNAKE_CASE ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(_SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_SCREAMING_SNAKE_CASE ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.relabel(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__snake_case : int = [0]
__snake_case : Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__snake_case : Optional[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__snake_case : Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__snake_case : Optional[Any] = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 18 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : str, _UpperCamelCase : List[str] ) -> Any:
A_ = 1.5
A_ = int(factor * num_class_images )
A_ = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=_UpperCamelCase, aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''', exist_ok=_UpperCamelCase )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
A_ = client.query(text=_UpperCamelCase )
if len(_UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
A_ = int(factor * num_images )
A_ = ClipClient(
url='''https://knn.laion.ai/knn-service''', indice_name='''laion_400m''', num_images=_UpperCamelCase, aesthetic_weight=0.1, )
A_ = 0
A_ = 0
A_ = tqdm(desc='''downloading real regularization images''', total=_UpperCamelCase )
with open(F'''{class_data_dir}/caption.txt''', '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''', '''w''' ) as fa, open(
F'''{class_data_dir}/images.txt''', '''w''' ) as fa:
while total < num_class_images:
A_ = class_images[count]
count += 1
try:
A_ = requests.get(images['''url'''] )
if img.status_code == 2_00:
A_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''', '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = argparse.ArgumentParser('''''', add_help=_UpperCamelCase )
parser.add_argument('''--class_prompt''', help='''text prompt to retrieve images''', required=_UpperCamelCase, type=_UpperCamelCase )
parser.add_argument('''--class_data_dir''', help='''path to save images''', required=_UpperCamelCase, type=_UpperCamelCase )
parser.add_argument('''--num_class_images''', help='''number of images to download''', default=2_00, type=_UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__snake_case : str = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 18 | '''simple docstring'''
import math
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 18 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__snake_case : List[str] = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
A_ = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase, _UpperCamelCase )
__snake_case : Optional[Any] = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Optional[Any]:
A_ = list(s_dict.keys() )
for key in keys:
A_ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A_ = new_key.replace(_UpperCamelCase, _UpperCamelCase )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(_UpperCamelCase )
return s_dict
def _UpperCAmelCase ( _UpperCamelCase : Any ) -> List[Any]:
A_ ,A_ = emb.weight.shape
A_ = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bytes:
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.basename(_UpperCamelCase )
A_ = url.split('''/''' )[-2]
A_ = os.path.join(_UpperCamelCase, _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
A_ = open(_UpperCamelCase, '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase, '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ), ncols=80, unit='''iB''', unit_scale=_UpperCamelCase, unit_divisor=10_24 ) as loop:
while True:
A_ = source.read(81_92 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
A_ = open(_UpperCamelCase, '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Optional[Any] ) -> Any:
if ".pt" not in checkpoint_path:
A_ = _download(_MODELS[checkpoint_path] )
else:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
A_ = original_checkpoint['''dims''']
A_ = original_checkpoint['''model_state_dict''']
A_ = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
A_ = True
A_ = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
A_ = WhisperConfig(
vocab_size=dimensions['''n_vocab'''], encoder_ffn_dim=_UpperCamelCase, decoder_ffn_dim=_UpperCamelCase, num_mel_bins=dimensions['''n_mels'''], d_model=dimensions['''n_audio_state'''], max_target_positions=dimensions['''n_text_ctx'''], encoder_layers=dimensions['''n_audio_layer'''], encoder_attention_heads=dimensions['''n_audio_head'''], decoder_layers=dimensions['''n_text_layer'''], decoder_attention_heads=dimensions['''n_text_state'''], max_source_positions=dimensions['''n_audio_ctx'''], )
A_ = WhisperForConditionalGeneration(_UpperCamelCase )
A_ ,A_ = model.model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
A_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A_ = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__snake_case : Union[str, Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Union[str, Any], _UpperCamelCase : List[str] ) -> Tuple:
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Any, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple=7_00_00 ) -> List[Any]:
A_ = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
A_ = sigmoid_function(_UpperCamelCase )
A_ = np.dot(x.T, h - y ) / y.size
A_ = theta - alpha * gradient # updating the weights
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
A_ = sigmoid_function(_UpperCamelCase )
A_ = cost_function(_UpperCamelCase, _UpperCamelCase )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : Optional[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : Tuple = 0.1
__snake_case : Any = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> List[str]:
return sigmoid_function(
np.dot(_UpperCamelCase, _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Dict = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 18 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
if "model" in sd.keys():
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
# pop unnecessary weights
A_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
A_ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ = sd.pop(_UpperCamelCase )
A_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ = sd[key]
# We split QKV in separate Q,K,V
A_ = key.replace('''.qkv_proj.''', '''.q_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.k_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.v_proj.''' )
A_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ ,A_ ,A_ = torch.split(_UpperCamelCase, depth // 3, dim=0 )
A_ = q
A_ = k
A_ = v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=None ) -> Dict:
A_ = load_checkpoint(_UpperCamelCase )
if config is not None:
A_ = OPTConfig.from_pretrained(_UpperCamelCase )
else:
A_ = OPTConfig()
A_ = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__snake_case : str = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[int]:
A_ = R'''\w+[.]\d+'''
A_ = re.findall(_UpperCamelCase, _UpperCamelCase )
for pat in pats:
A_ = key.replace(_UpperCamelCase, '''_'''.join(pat.split('''.''' ) ) )
return key
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : List[Any], _UpperCamelCase : Optional[int] ) -> Any:
A_ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
A_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Dict, _UpperCamelCase : Union[str, Any]=42 ) -> Union[str, Any]:
# Step 1: Convert pytorch tensor to numpy
A_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ = flax_model.init_weights(PRNGKey(_UpperCamelCase ) )
A_ = flatten_dict(_UpperCamelCase )
A_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ = rename_key(_UpperCamelCase )
A_ = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
A_ ,A_ = rename_key_and_reshape_tensor(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A_ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
| 18 | '''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__snake_case : Tuple = {'allegro/herbert-base-cased': 514}
__snake_case : List[str] = {}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = StableUnCLIPPipeline
__lowercase : int = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowercase : Dict = False
def __A ( self ) -> Any:
A_ = 32
A_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_SCREAMING_SNAKE_CASE , projection_dim=_SCREAMING_SNAKE_CASE , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
A_ = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
A_ = StableUnCLIPImageNormalizer(embedding_dim=_SCREAMING_SNAKE_CASE )
A_ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
A_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_SCREAMING_SNAKE_CASE , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=_SCREAMING_SNAKE_CASE , use_linear_projection=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
A_ = AutoencoderKL()
A_ = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Tuple:
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> int:
A_ = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[str]:
A_ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
A_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
A_ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ = pipe('''anime turle''' , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' )
A_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
A_ = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
A_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 18 | '''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
A_ = subparsers.add_parser('''env''' )
else:
A_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = is_xpu_available()
A_ = is_npu_available()
A_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
A_ = load_config_from_file(args.config_file ).to_dict()
A_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
A_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
A_ = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase, _UpperCamelCase )
else F'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
A_ = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
A_ = env_command_parser()
A_ = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCAmelCase ( _UpperCamelCase : int ) -> str:
random.seed(_UpperCamelCase )
np.random.seed(_UpperCamelCase )
torch.manual_seed(_UpperCamelCase )
torch.cuda.manual_seed_all(_UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.9_999 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = 2 / 3 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
A_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
A_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A_ = True
if kwargs.get('''max_value''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
A_ = kwargs['''max_value''']
if kwargs.get('''min_value''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
A_ = kwargs['''min_value''']
A_ = list(_SCREAMING_SNAKE_CASE )
A_ = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _SCREAMING_SNAKE_CASE ) is not None:
A_ = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
self.to(device=kwargs['''device'''] )
A_ = None
A_ = decay
A_ = min_decay
A_ = update_after_step
A_ = use_ema_warmup
A_ = inv_gamma
A_ = power
A_ = 0
A_ = None # set in `step()`
A_ = model_cls
A_ = model_config
@classmethod
def __A ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> "EMAModel":
A_ ,A_ = model_cls.load_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE )
A_ = model_cls.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = cls(model.parameters() , model_cls=_SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(_SCREAMING_SNAKE_CASE )
return ema_model
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
A_ = self.model_cls.from_config(self.model_config )
A_ = self.state_dict()
state_dict.pop('''shadow_params''' , _SCREAMING_SNAKE_CASE )
model.register_to_config(**_SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> float:
A_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A_ = (1 + step) / (10 + step)
A_ = min(_SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
A_ = max(_SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Module ):
A_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE , )
A_ = parameters.parameters()
A_ = list(_SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A_ = self.get_decay(self.optimization_step )
A_ = decay
A_ = 1 - decay
A_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A_ = deepspeed.zero.GatheredParameters(_SCREAMING_SNAKE_CASE , modifier_rank=_SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = list(_SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> None:
A_ = [
p.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=_SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def __A ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = [param.detach().cpu().clone() for param in parameters]
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
A_ = None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
A_ = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
A_ = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid min_decay''' )
A_ = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid optimization_step''' )
A_ = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid update_after_step''' )
A_ = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid use_ema_warmup''' )
A_ = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
A_ = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
A_ = state_dict.get('''shadow_params''' , _SCREAMING_SNAKE_CASE )
if shadow_params is not None:
A_ = shadow_params
if not isinstance(self.shadow_params , _SCREAMING_SNAKE_CASE ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : List[Any] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'audio-spectrogram-transformer'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = patch_size
A_ = qkv_bias
A_ = frequency_stride
A_ = time_stride
A_ = max_length
A_ = num_mel_bins
| 18 | '''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = 'xlm-prophetnet'
__lowercase : Optional[int] = ['past_key_values']
__lowercase : int = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 3_0522 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 4096 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2 , **_SCREAMING_SNAKE_CASE , ) -> int:
A_ = vocab_size
A_ = hidden_size
A_ = encoder_ffn_dim
A_ = num_encoder_layers
A_ = num_encoder_attention_heads
A_ = decoder_ffn_dim
A_ = num_decoder_layers
A_ = num_decoder_attention_heads
A_ = max_position_embeddings
A_ = init_std # Normal(0, this parameter)
A_ = activation_function
# parameters for xlmprophetnet
A_ = ngram
A_ = num_buckets
A_ = relative_max_distance
A_ = disable_ngram_loss
A_ = eps
# 3 Types of Dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = dropout
A_ = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@property
def __A ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : int=(), _UpperCamelCase : Optional[Any]=None, _UpperCamelCase : Tuple="no", _UpperCamelCase : Any="29500" ) -> Any:
A_ = False
A_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
A_ = True
elif "IPython" in sys.modules:
A_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
A_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''', _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
A_ = 8
A_ = PrepareForLaunch(_UpperCamelCase, distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase, master_addr='''127.0.01''', master_port=_UpperCamelCase, mixed_precision=_UpperCamelCase ):
A_ = PrepareForLaunch(_UpperCamelCase, distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
A_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : List[str]=(), _UpperCamelCase : Union[str, Any]=2 ) -> Union[str, Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase, master_addr='''127.0.01''', master_port='''29500''', accelerate_mixed_precision='''no''', accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu='''yes''', ):
A_ = PrepareForLaunch(_UpperCamelCase, debug=_UpperCamelCase )
start_processes(_UpperCamelCase, args=_UpperCamelCase, nprocs=_UpperCamelCase, start_method='''fork''' )
| 18 | '''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
A_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) )
return round(_UpperCamelCase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
__snake_case : Dict = 8.3_144_598
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : float ) -> float:
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__snake_case : List[Any] = 300
__snake_case : Union[str, Any] = 28
__snake_case : int = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 18 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Optional[int] = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : List[str], _UpperCamelCase : Tuple ) -> Tuple:
A_ = os.path.abspath(_UpperCamelCase )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
A_ = tf.train.list_variables(_UpperCamelCase )
A_ = []
A_ = []
A_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
A_ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
A_ = name[1:]
# figure out how many levels deep the name is
A_ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
A_ = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase )
names.append('''/'''.join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(F'''Read a total of {len(_UpperCamelCase ):,} layers''' )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})''' )
A_ = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ):
A_ = full_name.split('''/''' )
A_ = model
A_ = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
A_ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
A_ = getattr(_UpperCamelCase, '''embeddings''' )
A_ = getattr(_UpperCamelCase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
A_ = getattr(_UpperCamelCase, '''encoder''' )
A_ = getattr(_UpperCamelCase, '''layer''' )
A_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
A_ = getattr(_UpperCamelCase, '''pooler''' )
A_ = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
A_ = getattr(_UpperCamelCase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
A_ = getattr(_UpperCamelCase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
A_ = getattr(_UpperCamelCase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
A_ = getattr(_UpperCamelCase, '''token_type_embeddings''' )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
A_ = getattr(_UpperCamelCase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
A_ = getattr(_UpperCamelCase, '''attention''' )
A_ = getattr(_UpperCamelCase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
A_ = getattr(_UpperCamelCase, '''attention''' )
A_ = getattr(_UpperCamelCase, '''output''' )
A_ = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
A_ = getattr(_UpperCamelCase, '''attention''' )
A_ = getattr(_UpperCamelCase, '''output''' )
A_ = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
A_ = getattr(_UpperCamelCase, '''output''' )
A_ = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
A_ = getattr(_UpperCamelCase, '''output''' )
A_ = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
A_ = getattr(_UpperCamelCase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
A_ = getattr(_UpperCamelCase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
A_ = getattr(_UpperCamelCase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
A_ = getattr(_UpperCamelCase, '''intermediate''' )
A_ = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
A_ = getattr(_UpperCamelCase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
A_ = getattr(_UpperCamelCase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
A_ = getattr(_UpperCamelCase, '''weight''' )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
A_ = '''.'''.join(_UpperCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ):
A_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
A_ = array.transpose()
if pointer.shape == array.shape:
A_ = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Tuple, _UpperCamelCase : Optional[int] ) -> int:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
A_ = BertConfig.from_json_file(_UpperCamelCase )
A_ = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__snake_case : Any = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 18 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__snake_case : Union[str, Any] = 'src/transformers'
__snake_case : Dict = 'docs/source/en/tasks'
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Tuple, _UpperCamelCase : int ) -> List[Any]:
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(_UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(_UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__snake_case : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
__snake_case : Optional[int] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__snake_case : str = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCamelCase, set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple=False ) -> Optional[Any]:
A_ ,A_ ,A_ ,A_ = _find_text_in_file(
filename=os.path.join(_UpperCamelCase, _UpperCamelCase ), start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''', end_prompt='''<!--End of the generated tip-->''', )
A_ = get_model_list_for_task(_UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCamelCase, _UpperCamelCase ), '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 18 | '''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.