code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase : Union[str, Any] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :str ): __UpperCAmelCase = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) __UpperCAmelCase = MaskFormerConfig(backbone_config=snake_case_ ) __UpperCAmelCase = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok __UpperCAmelCase = 847 __UpperCAmelCase = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok __UpperCAmelCase = 150 __UpperCAmelCase = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok __UpperCAmelCase = 171 __UpperCAmelCase = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO __UpperCAmelCase = 133 __UpperCAmelCase = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok __UpperCAmelCase = 19 __UpperCAmelCase = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok __UpperCAmelCase = 65 __UpperCAmelCase = '''mapillary-vistas-id2label.json''' __UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} return config def lowercase__ ( snake_case_ :str ): __UpperCAmelCase = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def lowercase__ ( snake_case_ :List[str] , snake_case_ :int , snake_case_ :Union[str, Any] ): __UpperCAmelCase = dct.pop(snake_case_ ) __UpperCAmelCase = val def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any ): __UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __UpperCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) __UpperCAmelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[:dim, :] __UpperCAmelCase = in_proj_bias[: dim] __UpperCAmelCase = in_proj_weight[ dim : dim * 2, : ] __UpperCAmelCase = in_proj_bias[ dim : dim * 2 ] __UpperCAmelCase = in_proj_weight[ -dim :, : ] __UpperCAmelCase = in_proj_bias[-dim :] # fmt: on def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :str ): # fmt: off __UpperCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[: hidden_size, :] __UpperCAmelCase = in_proj_bias[:config.hidden_size] __UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __UpperCAmelCase = in_proj_weight[-hidden_size :, :] __UpperCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[: hidden_size, :] __UpperCAmelCase = in_proj_bias[:config.hidden_size] __UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __UpperCAmelCase = in_proj_weight[-hidden_size :, :] __UpperCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def lowercase__ ( ): __UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :str , snake_case_ :bool = False ): __UpperCAmelCase = get_maskformer_config(snake_case_ ) # load original state_dict with open(snake_case_ , '''rb''' ) as f: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __UpperCAmelCase = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # update to torch tensors for key, value in state_dict.items(): __UpperCAmelCase = torch.from_numpy(snake_case_ ) # load 🤗 model __UpperCAmelCase = MaskFormerForInstanceSegmentation(snake_case_ ) model.eval() for name, param in model.named_parameters(): print(snake_case_ , param.shape ) __UpperCAmelCase , __UpperCAmelCase = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(snake_case_ ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results __UpperCAmelCase = prepare_img() if "vistas" in model_name: __UpperCAmelCase = 65 elif "cityscapes" in model_name: __UpperCAmelCase = 65_535 else: __UpperCAmelCase = 255 __UpperCAmelCase = True if '''ade''' in model_name else False __UpperCAmelCase = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ ) __UpperCAmelCase = image_processor(snake_case_ , return_tensors='''pt''' ) __UpperCAmelCase = model(**snake_case_ ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __UpperCAmelCase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowercase : int = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
49
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def a ( self : List[str] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ): __UpperCAmelCase = ort.SessionOptions() __UpperCAmelCase = False return options def a ( self : Any ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self : Optional[int] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :int ): if not isinstance(snake_case_ , snake_case_ ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __UpperCAmelCase = 0 __UpperCAmelCase = str(snake_case_ ) while len(snake_case_ ) != 1: __UpperCAmelCase = [int(snake_case_ ) for i in num_string] __UpperCAmelCase = 1 for i in range(0 , len(snake_case_ ) ): total *= numbers[i] __UpperCAmelCase = str(snake_case_ ) steps += 1 return steps def lowercase__ ( snake_case_ :int ): if not isinstance(snake_case_ , snake_case_ ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __UpperCAmelCase = 0 __UpperCAmelCase = str(snake_case_ ) while len(snake_case_ ) != 1: __UpperCAmelCase = [int(snake_case_ ) for i in num_string] __UpperCAmelCase = 0 for i in range(0 , len(snake_case_ ) ): total += numbers[i] __UpperCAmelCase = str(snake_case_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( snake_case_ :Dict , snake_case_ :int ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __UpperCAmelCase = features.copy() __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ): if issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = jsonl_path elif issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = [jsonl_path] __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: __UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ): if split: __UpperCAmelCase = {split: jsonl_path} else: __UpperCAmelCase = '''train''' __UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( snake_case_ :Optional[int] ): return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Any ): return [json.loads(snake_case_ ) for line in buffer] class _UpperCAmelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 def a ( self : int , _lowercase : Any ): with pytest.raises(_lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ): __UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' __UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() assert exported_content == original_content
49
1
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase__ ( snake_case_ :Union[str, Any] ): return 1 / (1 + np.exp(-z )) def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean() def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Tuple , snake_case_ :int ): __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple , snake_case_ :Any , snake_case_ :Dict=70_000 ): __UpperCAmelCase = np.zeros(x.shape[1] ) for iterations in range(snake_case_ ): __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) __UpperCAmelCase = sigmoid_function(snake_case_ ) __UpperCAmelCase = np.dot(x.T , h - y ) / y.size __UpperCAmelCase = theta - alpha * gradient # updating the weights __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) __UpperCAmelCase = sigmoid_function(snake_case_ ) __UpperCAmelCase = cost_function(snake_case_ , snake_case_ ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _lowercase : Optional[Any] = datasets.load_iris() _lowercase : Optional[int] = iris.data[:, :2] _lowercase : List[str] = (iris.target != 0) * 1 _lowercase : str = 0.1 _lowercase : Union[str, Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00) print('theta: ', theta) # printing the theta i.e our weights vector def lowercase__ ( snake_case_ :int ): return sigmoid_function( np.dot(snake_case_ , snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((_lowercase) ,(_lowercase)) : List[Any] = (x[:, 0].min(), x[:, 0].max()) ((_lowercase) ,(_lowercase)) : Optional[int] = (x[:, 1].min(), x[:, 1].max()) ((_lowercase) ,(_lowercase)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _lowercase : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()] _lowercase : Optional[int] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
49
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : int = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowercase : Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = ["pixel_values"] def __init__( self : Optional[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_lowercase : List[Any] , ): super().__init__(**_lowercase ) __UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_24} __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' ) __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = resample __UpperCAmelCase = do_center_crop __UpperCAmelCase = crop_size __UpperCAmelCase = do_rescale __UpperCAmelCase = rescale_factor __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a ( self : Dict , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ): __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: __UpperCAmelCase = int((2_56 / 2_24) * size['''shortest_edge'''] ) __UpperCAmelCase = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowercase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict , ): __UpperCAmelCase = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase ) def a ( self : str , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ): return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict , ): return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Dict[str, int]] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, Iterable[float]]] = None , _lowercase : Optional[Union[float, Iterable[float]]] = None , _lowercase : Optional[TensorType] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Optional[Any] , ): __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = resample if resample is not None else self.resample __UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase = image_std if image_std is not None else self.image_std __UpperCAmelCase = size if size is not None else self.size __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' ) __UpperCAmelCase = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(_lowercase ) for image in images] if do_resize: __UpperCAmelCase = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images] if do_center_crop: __UpperCAmelCase = [self.center_crop(_lowercase , _lowercase ) for image in images] if do_rescale: __UpperCAmelCase = [self.rescale(_lowercase , _lowercase ) for image in images] if do_normalize: __UpperCAmelCase = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images] __UpperCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] __UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
49
"""simple docstring""" def lowercase__ ( snake_case_ :dict ): __UpperCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCAmelCase = set() return any( node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for node in graph ) def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ): visited.add(snake_case_ ) rec_stk.add(snake_case_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(snake_case_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[int] ): # This function is recursive __UpperCAmelCase = len(snake_case_ ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __UpperCAmelCase = array[0] __UpperCAmelCase = False __UpperCAmelCase = 1 __UpperCAmelCase = [] while not is_found and i < array_length: if array[i] < pivot: __UpperCAmelCase = True __UpperCAmelCase = [element for element in array[i:] if element >= array[i]] __UpperCAmelCase = longest_subsequence(snake_case_ ) if len(snake_case_ ) > len(snake_case_ ): __UpperCAmelCase = temp_array else: i += 1 __UpperCAmelCase = [element for element in array[1:] if element >= pivot] __UpperCAmelCase = [pivot, *longest_subsequence(snake_case_ )] if len(snake_case_ ) > len(snake_case_ ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
1
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union _lowercase : int = TypeVar('T') _lowercase : str = Union[List[T], Tuple[T, ...]] _lowercase : Optional[Any] = Union[T, List[T], Dict[str, T]] _lowercase : List[Any] = Union[str, bytes, os.PathLike]
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
"""simple docstring""" _lowercase : Union[str, Any] = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def lowercase__ ( snake_case_ :float ): assert type(snake_case_ ) in (int, float) and decimal == int(snake_case_ ) __UpperCAmelCase = int(snake_case_ ) __UpperCAmelCase = '''''' __UpperCAmelCase = False if decimal < 0: __UpperCAmelCase = True decimal *= -1 while decimal > 0: __UpperCAmelCase , __UpperCAmelCase = divmod(snake_case_ , 16 ) __UpperCAmelCase = values[remainder] + hexadecimal __UpperCAmelCase = '''0x''' + hexadecimal if negative: __UpperCAmelCase = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _lowercase : Optional[Any] = 'pt' elif is_tf_available(): _lowercase : int = 'tf' else: _lowercase : str = 'jax' class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Optional[Any] = ByTaTokenizer a__ : Optional[int] = False def a ( self : Tuple ): super().setUp() __UpperCAmelCase = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a ( self : List[str] ): return ByTaTokenizer.from_pretrained('''google/byt5-small''' ) def a ( self : List[Any] , **_lowercase : List[Any] ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False , _lowercase : List[str]=20 , _lowercase : List[Any]=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __UpperCAmelCase = [] for i in range(len(_lowercase ) ): try: __UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __UpperCAmelCase = list(filter(lambda _lowercase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _lowercase ) ) __UpperCAmelCase = list(filter(lambda _lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowercase ) , _lowercase ) ) if max_length is not None and len(_lowercase ) > max_length: __UpperCAmelCase = toks[:max_length] if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0: while len(_lowercase ) < min_length: __UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] __UpperCAmelCase = [t[0] for t in toks] # Ensure consistency __UpperCAmelCase = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) if " " not in output_txt and len(_lowercase ) > 1: __UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowercase ) ) if with_prefix_space: __UpperCAmelCase = ''' ''' + output_txt __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) return output_txt, output_ids def a ( self : Any ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] ) __UpperCAmelCase = tokenizer(['''hi''', '''I went to the gym''', ''''''] ) self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] ) def a ( self : Any ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = '''Unicode €.''' __UpperCAmelCase = tokenizer(_lowercase ) __UpperCAmelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1] self.assertEqual(encoded['''input_ids'''] , _lowercase ) # decoding __UpperCAmelCase = tokenizer.decode(_lowercase ) self.assertEqual(_lowercase , '''Unicode €.</s>''' ) __UpperCAmelCase = tokenizer('''e è é ê ë''' ) __UpperCAmelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1] self.assertEqual(encoded['''input_ids'''] , _lowercase ) # decoding __UpperCAmelCase = tokenizer.decode(_lowercase ) self.assertEqual(_lowercase , '''e è é ê ë</s>''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off __UpperCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0] # fmt: on __UpperCAmelCase = tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) if FRAMEWORK != "jax": __UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: __UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowercase , _lowercase ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def a ( self : Any ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __UpperCAmelCase = tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , _lowercase ) self.assertIn('''attention_mask''' , _lowercase ) self.assertNotIn('''decoder_input_ids''' , _lowercase ) self.assertNotIn('''decoder_attention_mask''' , _lowercase ) def a ( self : Tuple ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] __UpperCAmelCase = tokenizer( text_target=_lowercase , max_length=32 , padding='''max_length''' , truncation=_lowercase , return_tensors=_lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def a ( self : List[Any] ): __UpperCAmelCase = self.ta_base_tokenizer __UpperCAmelCase = ['''A long paragraph for summarization. </s>'''] __UpperCAmelCase = ['''Summary of the text. </s>'''] # fmt: off __UpperCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1] __UpperCAmelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1] # fmt: on __UpperCAmelCase = tokenizer(_lowercase , text_target=_lowercase ) self.assertEqual(_lowercase , batch['''input_ids'''][0] ) self.assertEqual(_lowercase , batch['''labels'''][0] ) def a ( self : Union[str, Any] ): # safety check on max_len default value so we are sure the test works __UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer.__class__.from_pretrained(_lowercase ) __UpperCAmelCase = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) shutil.rmtree(_lowercase ) __UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer.__class__.from_pretrained(_lowercase ) __UpperCAmelCase = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __UpperCAmelCase = tokenizer.__class__.from_pretrained(_lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_lowercase ) def a ( self : List[str] ): __UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowercase ) with open(os.path.join(_lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __UpperCAmelCase = json.load(_lowercase ) with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __UpperCAmelCase = json.load(_lowercase ) __UpperCAmelCase = [F'''<extra_id_{i}>''' for i in range(1_25 )] __UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] __UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(_lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_lowercase , _lowercase ) with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_lowercase , _lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCAmelCase = tokenizer_class.from_pretrained( _lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_lowercase )] __UpperCAmelCase = tokenizer_class.from_pretrained( _lowercase , additional_special_tokens=_lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def a ( self : Union[str, Any] ): __UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_class.from_pretrained(_lowercase ) self.assertTrue(tokenizer.decode([2_55] ) == '''''' ) def a ( self : Any ): pass def a ( self : List[Any] ): pass def a ( self : Tuple ): pass def a ( self : Union[str, Any] ): pass def a ( self : str ): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __UpperCAmelCase = self.get_tokenizers(fast=_lowercase , do_lower_case=_lowercase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>'''] __UpperCAmelCase = tokenizer.convert_tokens_to_string(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) def a ( self : int ): __UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] __UpperCAmelCase = 0 __UpperCAmelCase = tokenizer.convert_ids_to_tokens( _lowercase , skip_special_tokens=_lowercase ) for attr in attributes_list: setattr(_lowercase , attr + '''_id''' , _lowercase ) self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase ) self.assertEqual(getattr(_lowercase , attr + '''_id''' ) , _lowercase ) setattr(_lowercase , attr + '''_id''' , _lowercase ) self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase ) self.assertEqual(getattr(_lowercase , attr + '''_id''' ) , _lowercase ) setattr(_lowercase , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(_lowercase , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(_lowercase , '''additional_special_tokens_ids''' ) , [] ) setattr(_lowercase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_lowercase , '''additional_special_tokens''' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_lowercase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
49
"""simple docstring""" def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(snake_case_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :int ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __UpperCAmelCase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __UpperCAmelCase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from collections import deque class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ): __UpperCAmelCase = process_name # process name __UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __UpperCAmelCase = arrival_time __UpperCAmelCase = burst_time # remaining burst time __UpperCAmelCase = 0 # total time of the process wait in ready queue __UpperCAmelCase = 0 # time from arrival time to completion time class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ): # total number of mlfq's queues __UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied __UpperCAmelCase = time_slices # unfinished process is in this ready_queue __UpperCAmelCase = queue # current time __UpperCAmelCase = current_time # finished process is in this sequence queue __UpperCAmelCase = deque() def a ( self : Dict ): __UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a ( self : str , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a ( self : Any , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a ( self : Tuple , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a ( self : Optional[int] , _lowercase : deque[Process] ): return [q.burst_time for q in queue] def a ( self : str , _lowercase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a ( self : Union[str, Any] , _lowercase : deque[Process] ): __UpperCAmelCase = deque() # sequence deque of finished process while len(_lowercase ) != 0: __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __UpperCAmelCase = 0 # set the process's turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time __UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ): __UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_lowercase ) ): __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __UpperCAmelCase = 0 # set the finish time __UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __UpperCAmelCase , __UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : List[str] = Process('P1', 0, 53) _lowercase : str = Process('P2', 0, 17) _lowercase : Union[str, Any] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : Any = 3 _lowercase : Union[str, Any] = [17, 25] _lowercase : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _lowercase : Optional[Any] = Process('P1', 0, 53) _lowercase : Tuple = Process('P2', 0, 17) _lowercase : Optional[int] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : int = 3 _lowercase : int = [17, 25] _lowercase : List[str] = deque([Pa, Pa, Pa, Pa]) _lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
49
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int]=3 , _lowercase : str=32 , _lowercase : List[str]=3 , _lowercase : str=10 , _lowercase : Optional[int]=[10, 20, 30, 40] , _lowercase : Optional[Any]=[1, 1, 2, 1] , _lowercase : Optional[Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]="relu" , _lowercase : List[Any]=3 , _lowercase : Tuple=None , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = num_channels __UpperCAmelCase = embeddings_size __UpperCAmelCase = hidden_sizes __UpperCAmelCase = depths __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_act __UpperCAmelCase = num_labels __UpperCAmelCase = scope __UpperCAmelCase = len(_lowercase ) def a ( self : Any ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : List[str] ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a ( self : Optional[int] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : List[Any] ): __UpperCAmelCase = TFResNetModel(config=_lowercase ) __UpperCAmelCase = model(_lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a ( self : Any , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFResNetForImageClassification(_lowercase ) __UpperCAmelCase = model(_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self : int ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () a__ : Optional[Any] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) a__ : str = False a__ : int = False a__ : Union[str, Any] = False a__ : Optional[int] = False a__ : List[str] = False def a ( self : str ): __UpperCAmelCase = TFResNetModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase ) def a ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a ( self : int ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def a ( self : Union[str, Any] ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def a ( self : Tuple ): pass def a ( self : Any ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowercase ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def a ( self : Optional[Any] ): def check_hidden_states_output(_lowercase : str , _lowercase : Optional[Any] , _lowercase : int ): __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(_lowercase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCAmelCase = layer_type __UpperCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) def a ( self : int ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowercase ) @slow def a ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = TFResNetModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def lowercase__ ( ): __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def a ( self : Optional[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a ( self : Tuple ): __UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=_lowercase , return_tensors='''tf''' ) # forward pass __UpperCAmelCase = model(**_lowercase ) # verify the logits __UpperCAmelCase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , _lowercase ) __UpperCAmelCase = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4 ) )
49
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : List[Any] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "camembert" def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( _lowerCAmelCase ): @property def a ( self : Tuple ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase__ ( snake_case_ :np.ndarray ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase__ ( snake_case_ :np.ndarray ): return (gray > 127) & (gray <= 255) def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :np.ndarray ): __UpperCAmelCase = np.zeros_like(snake_case_ ) __UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image _lowercase : Optional[Any] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' _lowercase : Any = np.array(Image.open(lena_path)) # kernel to be applied _lowercase : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) _lowercase : Dict = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image _lowercase : int = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
49
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[Any] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "donut-swin" a__ : str = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , _lowercase : Union[str, Any]=2_24 , _lowercase : List[Any]=4 , _lowercase : Tuple=3 , _lowercase : Union[str, Any]=96 , _lowercase : Dict=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : Union[str, Any]=7 , _lowercase : List[str]=4.0 , _lowercase : List[Any]=True , _lowercase : Dict=0.0 , _lowercase : str=0.0 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[Any]=False , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=1E-5 , **_lowercase : int , ): super().__init__(**_lowercase ) __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = len(_lowercase ) __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
49
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Any = StableUnCLIPPipeline a__ : Dict = TEXT_TO_IMAGE_PARAMS a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ : Optional[int] = False def a ( self : List[str] ): __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ): if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def a ( self : Any ): __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=_lowercase ) def a ( self : int ): __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowercase ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Any ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase ) def a ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
49
1
"""simple docstring""" from collections import defaultdict from math import gcd def lowercase__ ( snake_case_ :int = 1_500_000 ): __UpperCAmelCase = defaultdict(snake_case_ ) __UpperCAmelCase = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ): if gcd(snake_case_ , snake_case_ ) > 1: continue __UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(snake_case_ , limit + 1 , snake_case_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f"""{solution() = }""")
49
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _lowercase : Any = logging.get_logger(__name__) class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Tuple , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ): warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
49
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : str = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _lowercase : int = { 'yjernite/retribert-base-uncased': 5_12, } _lowercase : Any = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = PRETRAINED_INIT_CONFIGURATION a__ : Optional[Any] = RetriBertTokenizer a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ): super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = strip_accents __UpperCAmelCase = tokenize_chinese_chars __UpperCAmelCase = normalizer_class(**_lowercase ) __UpperCAmelCase = do_lower_case def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ): __UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
49
1
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex _lowercase : int = logging.getLogger(__name__) class _UpperCAmelCase : def __init__( self : Tuple ): __UpperCAmelCase = False def a ( self : int , _lowercase : Tuple , _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple ): if not self.initialized: __UpperCAmelCase = RagRetriever( _lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , ) __UpperCAmelCase = True def a ( self : Optional[int] ): self.retriever.index.init_index() def a ( self : Any , _lowercase : Any , _lowercase : Optional[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.retriever._main_retrieve(_lowercase , _lowercase ) return doc_ids, retrieved_doc_embeds class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : List[Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any]=None ): if index is not None and index.is_initialized() and len(_lowercase ) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' ) super().__init__( _lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , index=_lowercase , init_retrieval=_lowercase , ) __UpperCAmelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_lowercase , _lowercase , _lowercase , _lowercase ) for worker in self.retrieval_workers ] ) def a ( self : List[str] ): logger.info('''initializing retrieval''' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def a ( self : int , _lowercase : str , _lowercase : Tuple ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCAmelCase , __UpperCAmelCase = ray.get(random_worker.retrieve.remote(_lowercase , _lowercase ) ) else: __UpperCAmelCase , __UpperCAmelCase = self._main_retrieve(_lowercase , _lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase ) @classmethod def a ( cls : Union[str, Any] , _lowercase : int , _lowercase : List[Any]=None , **_lowercase : str ): return super(_lowercase , cls ).get_tokenizers(_lowercase , _lowercase , **_lowercase ) @classmethod def a ( cls : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Any=None , **_lowercase : Union[str, Any] ): __UpperCAmelCase = kwargs.pop('''config''' , _lowercase ) or RagConfig.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = RagTokenizer.from_pretrained(_lowercase , config=_lowercase ) __UpperCAmelCase = rag_tokenizer.question_encoder __UpperCAmelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCAmelCase = '''custom''' __UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , _lowercase ) else: __UpperCAmelCase = cls._build_index(_lowercase ) return cls( _lowercase , question_encoder_tokenizer=_lowercase , generator_tokenizer=_lowercase , retrieval_workers=_lowercase , index=_lowercase , )
49
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _lowercase : Dict = 'bart' _lowercase : Dict = True @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __UpperCAmelCase = qar_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = (None, None) if MODEL_TYPE == "bart": __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __UpperCAmelCase = sas_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = faiss.StandardGpuResources() __UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __UpperCAmelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) __UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ ) wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU else: __UpperCAmelCase , __UpperCAmelCase = (None, None) __UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __UpperCAmelCase = elia['''train_eli5'''] __UpperCAmelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(snake_case_ ) return (elia_train, eli5_train_q_index) _lowercase ,_lowercase ,_lowercase : Dict = load_indexes() _lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models() _lowercase ,_lowercase : Tuple = load_train_data() def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ): __UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ ) __UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]] return nn_examples def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ): if source == "none": __UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase , __UpperCAmelCase = query_es_index( snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , ) __UpperCAmelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None), } ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ): with torch.no_grad(): __UpperCAmelCase = qa_sas_generate( snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar _lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' _lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) _lowercase : str = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] _lowercase : Optional[int] = st.sidebar.checkbox('Demo options') if demo_options: _lowercase : Tuple = st.sidebar.selectbox( '', action_list, index=3, ) _lowercase : List[str] = action_list.index(action_st) _lowercase : str = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) _lowercase : int = show_type == 'Show full text of passages' else: _lowercase : str = 3 _lowercase : List[Any] = True _lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options') if retrieval_options: _lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) _lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: _lowercase : List[str] = 'wiki40b' _lowercase : Optional[int] = 'dense' _lowercase : List[Any] = 'beam' _lowercase : str = 2 _lowercase : Optional[int] = 64 _lowercase : Union[str, Any] = 2_56 _lowercase : List[str] = None _lowercase : Optional[int] = None _lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options') if generate_options: _lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) _lowercase : Optional[int] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) _lowercase : Optional[Any] = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": _lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _lowercase : List[Any] = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _lowercase : Dict = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _lowercase : Union[str, Any] = None # start main text _lowercase : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] _lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": _lowercase : Optional[Any] = st.text_input('Enter your question here:', '') else: _lowercase : int = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": _lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10) _lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) _lowercase : Dict = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _lowercase : Any = support_list[:10] _lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: _lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _lowercase ,_lowercase : Union[str, Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): _lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) _lowercase : Any = res[1].strip() if sec_titles == "": _lowercase : Dict = '[{}]({})'.format(res[0], wiki_url) else: _lowercase : List[Any] = sec_titles.split(' & ') _lowercase : int = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: _lowercase : List[Any] = find_nearest_training(question) _lowercase : Tuple = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) _lowercase : int = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) _lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :int = 100 ): __UpperCAmelCase = set() __UpperCAmelCase = 0 __UpperCAmelCase = n + 1 # maximum limit for a in range(2 , snake_case_ ): for b in range(2 , snake_case_ ): __UpperCAmelCase = a**b # calculates the current power collect_powers.add(snake_case_ ) # adds the result to the set return len(snake_case_ ) if __name__ == "__main__": print('Number of terms ', solution(int(str(input()).strip())))
49
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = CycleDiffusionPipeline a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __UpperCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __UpperCAmelCase = CLIPTextModel(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image / 2 + 0.5 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def a ( self : Optional[int] ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowercase , '''half''' ): __UpperCAmelCase = module.half() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def a ( self : Tuple ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def a ( self : List[str] ): return super().test_inference_batch_single_identical() @skip_mps def a ( self : int ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def a ( self : str ): return super().test_save_load_optional_components() @skip_mps def a ( self : int ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : int ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained( _lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def a ( self : Optional[Any] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
49
1
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : Optional[Any]=None ): __UpperCAmelCase = data __UpperCAmelCase = None def __repr__( self : Optional[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = self while temp: string_rep.append(F'''{temp.data}''' ) __UpperCAmelCase = temp.next return "->".join(_lowercase ) def lowercase__ ( snake_case_ :list ): if not elements_list: raise Exception('''The Elements List is empty''' ) __UpperCAmelCase = __UpperCAmelCase = Node(elements_list[0] ) for i in range(1 , len(snake_case_ ) ): __UpperCAmelCase = Node(elements_list[i] ) __UpperCAmelCase = current.next return head def lowercase__ ( snake_case_ :Node ): if head_node is not None and isinstance(snake_case_ , snake_case_ ): print_reverse(head_node.next ) print(head_node.data ) def lowercase__ ( ): from doctest import testmod testmod() __UpperCAmelCase = make_linked_list([14, 52, 14, 12, 43] ) print('''Linked List:''' ) print(snake_case_ ) print('''Elements in Reverse:''' ) print_reverse(snake_case_ ) if __name__ == "__main__": main()
49
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'} _lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _lowercase : List[str] = { 'google/rembert': 2_56, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Union[str, Any] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ): super().__init__( do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(_lowercase ) @property def a ( self : int ): return len(self.sp_model ) def a ( self : Tuple ): __UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Tuple , _lowercase : str ): __UpperCAmelCase = d __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ): __UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase ) return pieces def a ( self : int , _lowercase : List[str] ): return self.sp_model.PieceToId(_lowercase ) def a ( self : List[str] , _lowercase : str ): return self.sp_model.IdToPiece(_lowercase ) def a ( self : Any , _lowercase : Dict ): __UpperCAmelCase = self.sp_model.decode_pieces(_lowercase ) return out_string def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
49
1
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = "" a__ : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a__ : str = None # compression type in fsspec. ex: "gzip" a__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : Optional[int] , _lowercase : str = "" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , **_lowercase : Dict ): super().__init__(self , **_lowercase ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode __UpperCAmelCase = fsspec.open( _lowercase , mode='''rb''' , protocol=_lowercase , compression=self.compression , client_kwargs={ '''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459 '''trust_env''': True, # Enable reading proxy env variables. **(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) __UpperCAmelCase = os.path.basename(self.file.path.split('''::''' )[0] ) __UpperCAmelCase = ( self.compressed_name[: self.compressed_name.rindex('''.''' )] if '''.''' in self.compressed_name else self.compressed_name ) __UpperCAmelCase = None @classmethod def a ( cls : Tuple , _lowercase : Any ): # compressed file paths are always relative to the archive root return super()._strip_protocol(_lowercase ).lstrip('''/''' ) def a ( self : Dict ): if self.dir_cache is None: __UpperCAmelCase = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name} __UpperCAmelCase = {f['''name''']: f} def a ( self : Optional[int] , _lowercase : str ): return self.file.open().read() def a ( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[Any]=None , _lowercase : Any=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] , ): __UpperCAmelCase = self._strip_protocol(_lowercase ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "bz2" a__ : List[str] = "bz2" a__ : List[str] = ".bz2" class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "gzip" a__ : Tuple = "gzip" a__ : Optional[int] = ".gz" class _UpperCAmelCase ( _lowerCAmelCase ): a__ : int = "lz4" a__ : List[str] = "lz4" a__ : Union[str, Any] = ".lz4" class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Union[str, Any] = "xz" a__ : Optional[Any] = "xz" a__ : List[str] = ".xz" class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[Any] = "zstd" a__ : Any = "zstd" a__ : Union[str, Any] = ".zst" def __init__( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , _lowercase : int = DEFAULT_BLOCK_SIZE , **_lowercase : Any , ): super().__init__( fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 __UpperCAmelCase = self.file.__enter__ class _UpperCAmelCase : def __init__( self : int , _lowercase : Optional[int] ): __UpperCAmelCase = file_ def __enter__( self : Tuple ): self._file.__enter__() return self def __exit__( self : Union[str, Any] , *_lowercase : int , **_lowercase : int ): self._file.__exit__(*_lowercase , **_lowercase ) def __iter__( self : Optional[Any] ): return iter(self._file ) def a ( self : int ): return next(self._file ) def __getattr__( self : Dict , _lowercase : Union[str, Any] ): return getattr(self._file , _lowercase ) def fixed_enter(*_lowercase : Union[str, Any] , **_lowercase : Optional[Any] ): return WrappedFile(_enter(*_lowercase , **_lowercase ) ) __UpperCAmelCase = fixed_enter
49
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : List[Any] = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :int ): __UpperCAmelCase = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__ ( snake_case_ :int = 5_000 ): __UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case_ )] for i, pentagonal_i in enumerate(snake_case_ ): for j in range(snake_case_ , len(snake_case_ ) ): __UpperCAmelCase = pentagonal_nums[j] __UpperCAmelCase = pentagonal_i + pentagonal_j __UpperCAmelCase = pentagonal_j - pentagonal_i if is_pentagonal(snake_case_ ) and is_pentagonal(snake_case_ ): return b return -1 if __name__ == "__main__": print(f"""{solution() = }""")
49
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowercase : List[Any] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase__ ( snake_case_ :Union[str, Any] ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): if args.student_type == "roberta": __UpperCAmelCase = False elif args.student_type == "gpt2": __UpperCAmelCase = False def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ): if args.student_type == "roberta": __UpperCAmelCase = False def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' ) __UpperCAmelCase = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __UpperCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ ) __UpperCAmelCase = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __UpperCAmelCase = special_tok_ids __UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __UpperCAmelCase = 0.0 # do not predict special tokens __UpperCAmelCase = torch.from_numpy(snake_case_ ) else: __UpperCAmelCase = None __UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __UpperCAmelCase = student_config_class.from_pretrained(args.student_config ) __UpperCAmelCase = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: __UpperCAmelCase = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __UpperCAmelCase = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
49
1
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _lowerCAmelCase , ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = RobertaConfig a__ : str = "roberta" def __init__( self : List[str] , _lowercase : List[Any] ): super().__init__(_lowercase ) __UpperCAmelCase = RobertaEmbeddings(_lowercase ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _lowerCAmelCase , ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[int] = RobertaConfig a__ : Optional[int] = "roberta" def __init__( self : List[Any] , _lowercase : Union[str, Any] ): super().__init__(_lowercase ) __UpperCAmelCase = config.num_labels __UpperCAmelCase = config.num_hidden_layers __UpperCAmelCase = DeeRobertaModel(_lowercase ) __UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob ) __UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_lowercase ) def a ( self : str , _lowercase : Tuple=None , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Union[str, Any]=-1 , _lowercase : int=False , ): __UpperCAmelCase = self.num_layers try: __UpperCAmelCase = self.roberta( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , ) __UpperCAmelCase = outputs[1] __UpperCAmelCase = self.dropout(_lowercase ) __UpperCAmelCase = self.classifier(_lowercase ) __UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCAmelCase = e.message __UpperCAmelCase = e.exit_layer __UpperCAmelCase = outputs[0] if not self.training: __UpperCAmelCase = entropy(_lowercase ) __UpperCAmelCase = [] __UpperCAmelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCAmelCase = MSELoss() __UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCAmelCase = CrossEntropyLoss() __UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCAmelCase = [] for highway_exit in outputs[-1]: __UpperCAmelCase = highway_exit[0] if not self.training: highway_logits_all.append(_lowercase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCAmelCase = MSELoss() __UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCAmelCase = CrossEntropyLoss() __UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_lowercase ) if train_highway: __UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCAmelCase = (loss,) + outputs if not self.training: __UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCAmelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ): if len(snake_case_ ) == 0: return False __UpperCAmelCase = len(snake_case_ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case_ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case_ ) if __name__ == "__main__": _lowercase : str = input('Enter numbers separated by comma:\n').strip() _lowercase : int = [int(item.strip()) for item in user_input.split(',')] _lowercase : str = int(input('Enter the number to be found in the list:\n').strip()) _lowercase : Union[str, Any] = '' if binary_search(sequence, target) else 'not ' print(f"""{target} was {not_str}found in {sequence}""")
49
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowercase : Union[str, Any] = logging.getLogger(__name__) _lowercase : Optional[Any] = 'Hello world! cécé herlolip' _lowercase : str = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase__ ( snake_case_ :Any , snake_case_ :int ): __UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) __UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage ) __UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ ) original.eval() __UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) __UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs __UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) __UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __UpperCAmelCase = encoder_input_ids __UpperCAmelCase = decoder_input_ids __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = original.generator(snake_case_ ) __UpperCAmelCase = new_model( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = new_model.generator(snake_case_ ) __UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _lowercase : List[str] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[Any] ): __UpperCAmelCase = tempfile.mkdtemp() # fmt: off __UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __UpperCAmelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } __UpperCAmelCase = os.path.join(self.tmpdirname , _lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowercase , _lowercase ) def a ( self : Optional[int] , **_lowercase : List[str] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Optional[int] , **_lowercase : List[str] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Dict ): shutil.rmtree(self.tmpdirname ) def a ( self : str ): __UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __UpperCAmelCase = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self : Optional[int] ): __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def a ( self : int ): __UpperCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __UpperCAmelCase = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 ) __UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = image_processor(_lowercase , return_tensors='''np''' ) __UpperCAmelCase = processor(images=_lowercase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self : List[str] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = processor(text=_lowercase ) __UpperCAmelCase = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_lowercase ): processor() def a ( self : Optional[Any] ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) __UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase = processor.batch_decode(_lowercase ) __UpperCAmelCase = tokenizer.batch_decode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def a ( self : Dict ): __UpperCAmelCase = self.get_image_processor() __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = self.prepare_image_inputs() __UpperCAmelCase = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
49
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def a ( self : List[str] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ): __UpperCAmelCase = ort.SessionOptions() __UpperCAmelCase = False return options def a ( self : Any ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self : Optional[int] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
49
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :int | float | str , snake_case_ :int | float | str ): if nth_term == "": return [""] __UpperCAmelCase = int(snake_case_ ) __UpperCAmelCase = int(snake_case_ ) __UpperCAmelCase = [] for temp in range(int(snake_case_ ) ): series.append(F'''1 / {pow(temp + 1 , int(snake_case_ ) )}''' if series else '''1''' ) return series if __name__ == "__main__": import doctest doctest.testmod() _lowercase : Optional[int] = int(input('Enter the last number (nth term) of the P-Series')) _lowercase : Dict = int(input('Enter the power for P-Series')) print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p') print(p_series(nth_term, power))
49
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( snake_case_ :Dict , snake_case_ :int ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __UpperCAmelCase = features.copy() __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ): if issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = jsonl_path elif issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = [jsonl_path] __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: __UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ): if split: __UpperCAmelCase = {split: jsonl_path} else: __UpperCAmelCase = '''train''' __UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( snake_case_ :Optional[int] ): return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Any ): return [json.loads(snake_case_ ) for line in buffer] class _UpperCAmelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 def a ( self : int , _lowercase : Any ): with pytest.raises(_lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ): __UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' __UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() assert exported_content == original_content
49
1
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _lowercase : Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg') _lowercase : Any = cvtColor(img, COLOR_BGR2GRAY) def lowercase__ ( ): __UpperCAmelCase = cn.convert_to_negative(snake_case_ ) # assert negative_img array for at least one True assert negative_img.any() def lowercase__ ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(snake_case_ , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowercase__ ( ): __UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowercase__ ( ): __UpperCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __UpperCAmelCase = canny.canny(snake_case_ ) # assert canny array for at least one True assert canny_array.any() def lowercase__ ( ): assert gg.gaussian_filter(snake_case_ , 5 , sigma=0.9 ).all() def lowercase__ ( ): # laplace diagonals __UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __UpperCAmelCase = conv.img_convolve(snake_case_ , snake_case_ ).astype(snake_case_ ) assert res.any() def lowercase__ ( ): assert med.median_filter(snake_case_ , 3 ).any() def lowercase__ ( ): __UpperCAmelCase , __UpperCAmelCase = sob.sobel_filter(snake_case_ ) assert grad.any() and theta.any() def lowercase__ ( ): __UpperCAmelCase = sp.make_sepia(snake_case_ , 20 ) assert sepia.all() def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" ): __UpperCAmelCase = bs.Burkes(imread(snake_case_ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" , ): __UpperCAmelCase = rs.NearestNeighbour(imread(snake_case_ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowercase__ ( ): __UpperCAmelCase = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __UpperCAmelCase = imread(snake_case_ , 0 ) # Test for get_neighbors_pixel function() return not None __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = image[x_coordinate][y_coordinate] __UpperCAmelCase = lbp.get_neighbors_pixel( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __UpperCAmelCase = lbp.local_binary_value(snake_case_ , snake_case_ , snake_case_ ) assert lbp_image.any()
49
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : int = KandinskyImgaImgPipeline a__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image"] a__ : Optional[Any] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] a__ : Tuple = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a__ : Optional[Any] = False @property def a ( self : Tuple ): return 32 @property def a ( self : int ): return 32 @property def a ( self : Optional[Any] ): return self.time_input_dim @property def a ( self : Any ): return self.time_input_dim * 4 @property def a ( self : Union[str, Any] ): return 1_00 @property def a ( self : List[Any] ): __UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def a ( self : Union[str, Any] ): torch.manual_seed(0 ) __UpperCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __UpperCAmelCase = MultilingualCLIP(_lowercase ) __UpperCAmelCase = text_encoder.eval() return text_encoder @property def a ( self : Any ): torch.manual_seed(0 ) __UpperCAmelCase = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __UpperCAmelCase = UNetaDConditionModel(**_lowercase ) return model @property def a ( self : List[str] ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self : Tuple ): torch.manual_seed(0 ) __UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def a ( self : Optional[int] ): __UpperCAmelCase = self.dummy_text_encoder __UpperCAmelCase = self.dummy_tokenizer __UpperCAmelCase = self.dummy_unet __UpperCAmelCase = self.dummy_movq __UpperCAmelCase = { '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00_085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } __UpperCAmelCase = DDIMScheduler(**_lowercase ) __UpperCAmelCase = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : List[str]=0 ): __UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase ) # create init_image __UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def a ( self : List[Any] ): __UpperCAmelCase = '''cpu''' __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) ) __UpperCAmelCase = output.images __UpperCAmelCase = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : List[str] ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __UpperCAmelCase = '''A red cartoon frog, 4k''' __UpperCAmelCase = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) __UpperCAmelCase = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase , __UpperCAmelCase = pipe_prior( _lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __UpperCAmelCase = pipeline( _lowercase , image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
49
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Optional[Any] = { 'configuration_upernet': ['UperNetConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'UperNetForSemanticSegmentation', 'UperNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" def lowercase__ ( snake_case_ :dict ): __UpperCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCAmelCase = set() return any( node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for node in graph ) def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ): visited.add(snake_case_ ) rec_stk.add(snake_case_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(snake_case_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import math def lowercase__ ( snake_case_ :int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__ ( snake_case_ :float = 0.1 ): __UpperCAmelCase = 3 __UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(snake_case_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
1
"""simple docstring""" _lowercase : int = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _UpperCAmelCase ( unittest.TestCase ): a__ : List[str] = MODEL_FOR_CAUSAL_LM_MAPPING a__ : Optional[int] = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a ( self : Tuple ): __UpperCAmelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output __UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase ) self.assertEqual( _lowercase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) __UpperCAmelCase = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( _lowercase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) __UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase ) self.assertEqual( _lowercase , [ {'''generated_token_ids''': ANY(_lowercase )}, {'''generated_token_ids''': ANY(_lowercase )}, ] , ) __UpperCAmelCase = text_generator.model.config.eos_token_id __UpperCAmelCase = '''<pad>''' __UpperCAmelCase = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , ) self.assertEqual( _lowercase , [ [ {'''generated_token_ids''': ANY(_lowercase )}, {'''generated_token_ids''': ANY(_lowercase )}, ], [ {'''generated_token_ids''': ANY(_lowercase )}, {'''generated_token_ids''': ANY(_lowercase )}, ], ] , ) @require_tf def a ( self : Dict ): __UpperCAmelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output __UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase ) self.assertEqual( _lowercase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) __UpperCAmelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_lowercase ) self.assertEqual( _lowercase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def a ( self : Any , _lowercase : str , _lowercase : Optional[int] , _lowercase : str ): __UpperCAmelCase = TextGenerationPipeline(model=_lowercase , tokenizer=_lowercase ) return text_generator, ["This is a test", "Another test"] def a ( self : List[Any] ): __UpperCAmelCase = '''Hello I believe in''' __UpperCAmelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = text_generator(_lowercase ) self.assertEqual( _lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) __UpperCAmelCase = text_generator(_lowercase , stop_sequence=''' fe''' ) self.assertEqual(_lowercase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : str ): __UpperCAmelCase = text_generator.model __UpperCAmelCase = text_generator.tokenizer __UpperCAmelCase = text_generator('''This is a test''' ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __UpperCAmelCase = text_generator('''This is a test''' , return_full_text=_lowercase ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __UpperCAmelCase = pipeline(task='''text-generation''' , model=_lowercase , tokenizer=_lowercase , return_full_text=_lowercase ) __UpperCAmelCase = text_generator('''This is a test''' ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __UpperCAmelCase = text_generator('''This is a test''' , return_full_text=_lowercase ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __UpperCAmelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowercase ) self.assertEqual( _lowercase , [ [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], ] , ) if text_generator.tokenizer.pad_token is not None: __UpperCAmelCase = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase ) self.assertEqual( _lowercase , [ [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], [{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}], ] , ) with self.assertRaises(_lowercase ): __UpperCAmelCase = text_generator('''test''' , return_full_text=_lowercase , return_text=_lowercase ) with self.assertRaises(_lowercase ): __UpperCAmelCase = text_generator('''test''' , return_full_text=_lowercase , return_tensors=_lowercase ) with self.assertRaises(_lowercase ): __UpperCAmelCase = text_generator('''test''' , return_text=_lowercase , return_tensors=_lowercase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __UpperCAmelCase = text_generator('''''' ) self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __UpperCAmelCase = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __UpperCAmelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_00 , max_new_tokens=20 ) __UpperCAmelCase = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(_lowercase ): text_generator( '''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a ( self : List[Any] ): import torch # Classic `model_kwargs` __UpperCAmelCase = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __UpperCAmelCase = pipe('''This is a test''' ) self.assertEqual( _lowercase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __UpperCAmelCase = pipe('''This is a test''' ) self.assertEqual( _lowercase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __UpperCAmelCase = pipe('''This is a test''' ) self.assertEqual( _lowercase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def a ( self : Union[str, Any] ): import torch __UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def a ( self : int ): import torch __UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=_lowercase , top_p=0.5 ) def a ( self : int ): __UpperCAmelCase = '''Hello world''' __UpperCAmelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": __UpperCAmelCase = logging.get_logger('''transformers.generation.tf_utils''' ) else: __UpperCAmelCase = logging.get_logger('''transformers.generation.utils''' ) __UpperCAmelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(_lowercase ) as cl: __UpperCAmelCase = text_generator(_lowercase , max_length=10 , max_new_tokens=1 ) self.assertIn(_lowercase , cl.out ) # The user only sets one -> no warning with CaptureLogger(_lowercase ) as cl: __UpperCAmelCase = text_generator(_lowercase , max_new_tokens=1 ) self.assertNotIn(_lowercase , cl.out ) with CaptureLogger(_lowercase ) as cl: __UpperCAmelCase = text_generator(_lowercase , max_length=10 ) self.assertNotIn(_lowercase , cl.out )
49
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Tuple = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(snake_case_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a ( self : Any ): __UpperCAmelCase = 1 __UpperCAmelCase = 3 __UpperCAmelCase = (32, 32) __UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase ) return image @property def a ( self : int ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def a ( self : List[str] ): torch.manual_seed(0 ) __UpperCAmelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(_lowercase ) @property def a ( self : str ): def extract(*_lowercase : Optional[Any] , **_lowercase : Optional[Any] ): class _UpperCAmelCase : def __init__( self : Dict ): __UpperCAmelCase = torch.ones([0] ) def a ( self : Union[str, Any] , _lowercase : Tuple ): self.pixel_values.to(_lowercase ) return self return Out() return extract def a ( self : List[str] ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.dummy_cond_unet __UpperCAmelCase = PNDMScheduler(skip_prk_steps=_lowercase ) __UpperCAmelCase = self.dummy_vae __UpperCAmelCase = self.dummy_text_encoder __UpperCAmelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __UpperCAmelCase = 77 __UpperCAmelCase = self.dummy_image.to(_lowercase ) __UpperCAmelCase = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __UpperCAmelCase = AltDiffusionImgaImgPipeline( unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , ) __UpperCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase ) __UpperCAmelCase = alt_pipe.to(_lowercase ) alt_pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A painting of a squirrel eating a burger''' __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCAmelCase = alt_pipe( [prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowercase , ) __UpperCAmelCase = output.images __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 ) __UpperCAmelCase = alt_pipe( [prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowercase , return_dict=_lowercase , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : int ): __UpperCAmelCase = self.dummy_cond_unet __UpperCAmelCase = PNDMScheduler(skip_prk_steps=_lowercase ) __UpperCAmelCase = self.dummy_vae __UpperCAmelCase = self.dummy_text_encoder __UpperCAmelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) __UpperCAmelCase = 77 __UpperCAmelCase = self.dummy_image.to(_lowercase ) # put models in fp16 __UpperCAmelCase = unet.half() __UpperCAmelCase = vae.half() __UpperCAmelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCAmelCase = AltDiffusionImgaImgPipeline( unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , ) __UpperCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase ) __UpperCAmelCase = alt_pipe.to(_lowercase ) alt_pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A painting of a squirrel eating a burger''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = alt_pipe( [prompt] , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , image=_lowercase , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 __UpperCAmelCase = init_image.resize((7_60, 5_04) ) __UpperCAmelCase = '''BAAI/AltDiffusion''' __UpperCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained( _lowercase , safety_checker=_lowercase , ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A fantasy landscape, trending on artstation''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images[0] __UpperCAmelCase = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) __UpperCAmelCase = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Tuple ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __UpperCAmelCase = init_image.resize((7_68, 5_12) ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) __UpperCAmelCase = '''BAAI/AltDiffusion''' __UpperCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained( _lowercase , safety_checker=_lowercase , ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A fantasy landscape, trending on artstation''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
49
"""simple docstring""" from collections import deque class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ): __UpperCAmelCase = process_name # process name __UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __UpperCAmelCase = arrival_time __UpperCAmelCase = burst_time # remaining burst time __UpperCAmelCase = 0 # total time of the process wait in ready queue __UpperCAmelCase = 0 # time from arrival time to completion time class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ): # total number of mlfq's queues __UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied __UpperCAmelCase = time_slices # unfinished process is in this ready_queue __UpperCAmelCase = queue # current time __UpperCAmelCase = current_time # finished process is in this sequence queue __UpperCAmelCase = deque() def a ( self : Dict ): __UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a ( self : str , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a ( self : Any , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a ( self : Tuple , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a ( self : Optional[int] , _lowercase : deque[Process] ): return [q.burst_time for q in queue] def a ( self : str , _lowercase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a ( self : Union[str, Any] , _lowercase : deque[Process] ): __UpperCAmelCase = deque() # sequence deque of finished process while len(_lowercase ) != 0: __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __UpperCAmelCase = 0 # set the process's turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time __UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ): __UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_lowercase ) ): __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __UpperCAmelCase = 0 # set the finish time __UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __UpperCAmelCase , __UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : List[str] = Process('P1', 0, 53) _lowercase : str = Process('P2', 0, 17) _lowercase : Union[str, Any] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : Any = 3 _lowercase : Union[str, Any] = [17, 25] _lowercase : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _lowercase : Optional[Any] = Process('P1', 0, 53) _lowercase : Tuple = Process('P2', 0, 17) _lowercase : Optional[int] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : int = 3 _lowercase : int = [17, 25] _lowercase : List[str] = deque([Pa, Pa, Pa, Pa]) _lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
49
1
"""simple docstring""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str]=13 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=True , _lowercase : Tuple=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=99 , _lowercase : Union[str, Any]=64 , _lowercase : int=32 , _lowercase : Optional[Any]=5 , _lowercase : List[str]=4 , _lowercase : Optional[Any]=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Any=5_12 , _lowercase : Tuple=16 , _lowercase : List[str]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Tuple=3 , _lowercase : Any=4 , _lowercase : List[str]=None , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_input_mask __UpperCAmelCase = use_token_type_ids __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = embedding_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = initializer_range __UpperCAmelCase = num_labels __UpperCAmelCase = num_choices __UpperCAmelCase = scope def a ( self : List[str] ): __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None if self.use_token_type_ids: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self : Tuple ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : List[Any] ): __UpperCAmelCase = MegatronBertModel(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) __UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase ) __UpperCAmelCase = model(_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : Dict ): __UpperCAmelCase = MegatronBertForMaskedLM(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self : str , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] , _lowercase : str , _lowercase : str , _lowercase : Union[str, Any] ): __UpperCAmelCase = MegatronBertForCausalLM(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self : Optional[Any] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : str , _lowercase : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Any ): __UpperCAmelCase = MegatronBertForNextSentencePrediction(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Dict ): __UpperCAmelCase = MegatronBertForPreTraining(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def a ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : List[str] , _lowercase : List[str] , _lowercase : List[Any] ): __UpperCAmelCase = MegatronBertForQuestionAnswering(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self : int , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = MegatronBertForSequenceClassification(_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self : Optional[Any] , _lowercase : str , _lowercase : int , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[str] , _lowercase : int , _lowercase : Dict ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = MegatronBertForTokenClassification(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : Optional[Any] ): __UpperCAmelCase = self.num_choices __UpperCAmelCase = MegatronBertForMultipleChoice(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self : str ): __UpperCAmelCase = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ : Union[str, Any] = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ : Optional[Any] = True # test_resize_embeddings = False a__ : Dict = False def a ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[str]=False ): __UpperCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) if return_labels: if model_class in get_values(_lowercase ): __UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase ) __UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowercase ) return inputs_dict def a ( self : List[Any] ): __UpperCAmelCase = MegatronBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 ) def a ( self : str ): self.config_tester.run_common_tests() def a ( self : Union[str, Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*_lowercase ) def a ( self : str ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase ) def a ( self : List[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase ) def a ( self : Tuple ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase ) def a ( self : Dict ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase ) def a ( self : Dict ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase ) def a ( self : str ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase ) def lowercase__ ( snake_case_ :int ): return torch.tensor( snake_case_ , dtype=torch.long , device=snake_case_ , ) _lowercase : List[str] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: __UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , _lowercase ) __UpperCAmelCase = MegatronBertModel.from_pretrained(_lowercase ) model.to(_lowercase ) model.half() __UpperCAmelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __UpperCAmelCase = model(_lowercase )[0] __UpperCAmelCase = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , _lowercase ) __UpperCAmelCase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): __UpperCAmelCase = output[0, ii, jj] __UpperCAmelCase = expected[3 * ii + jj] __UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(_lowercase , _lowercase , _lowercase , _lowercase ) self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
49
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : List[Any] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "camembert" def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( _lowerCAmelCase ): @property def a ( self : Tuple ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowercase__ ( snake_case_ :Dict=32 , snake_case_ :Dict=10 , snake_case_ :Union[str, Any]=100 , snake_case_ :Union[str, Any]=1_026 , snake_case_ :str=True , snake_case_ :List[str]="data/tokenized_stories_train_wikitext103.jbl" , snake_case_ :List[str]="igf_context_pairs.jbl" , ): set_seed(3 ) # generate train_data and objective_set __UpperCAmelCase , __UpperCAmelCase = generate_datasets( snake_case_ , snake_case_ , number=snake_case_ , min_len=1_026 , trim=snake_case_ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model __UpperCAmelCase = load_gpta('''gpt2''' ).to(snake_case_ ) print('''computing perplexity on objective set''' ) __UpperCAmelCase = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ).item() print('''perplexity on objective set:''' , snake_case_ ) # collect igf pairs and save to file demo.jbl collect_objective_set(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=15 , snake_case_ :Any=128 , snake_case_ :int=100 , snake_case_ :List[str]="igf_model.pt" , ): set_seed(42 ) # Load pre-trained model __UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model __UpperCAmelCase = SecondaryLearner(snake_case_ ) # Train secondary learner __UpperCAmelCase = train_secondary_learner( snake_case_ , snake_case_ , max_epochs=snake_case_ , batch_size=snake_case_ , eval_freq=100 , igf_model_path=snake_case_ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowercase__ ( snake_case_ :str , snake_case_ :int , snake_case_ :str , snake_case_ :List[str]=32 , snake_case_ :Optional[Any]=1_000 , snake_case_ :Optional[Any]=16 , snake_case_ :List[str]=1.0 , snake_case_ :List[Any]=recopy_gpta , snake_case_ :Optional[int]=None , snake_case_ :Optional[int]=10 , snake_case_ :Tuple="gpt2_finetuned.pt" , ): __UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) __UpperCAmelCase = RandomSampler(snake_case_ ) __UpperCAmelCase = DataLoader(snake_case_ , sampler=snake_case_ ) __UpperCAmelCase = max_steps // (len(snake_case_ )) + 1 __UpperCAmelCase = 0 __UpperCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case_ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = recopy_model(snake_case_ , snake_case_ , snake_case_ ) model.train() if secondary_learner is not None: secondary_learner.to(snake_case_ ) secondary_learner.eval() __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = [] __UpperCAmelCase = [] # Compute the performance of the transformer model at the beginning __UpperCAmelCase = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ) test_perps.append(snake_case_ ) print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ ) for epoch in range(int(snake_case_ ) ): for step, example in enumerate(snake_case_ ): torch.cuda.empty_cache() __UpperCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 ) __UpperCAmelCase = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __UpperCAmelCase = model(snake_case_ , labels=snake_case_ ) __UpperCAmelCase = True if secondary_learner is not None: __UpperCAmelCase = secondary_learner.forward( torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(snake_case_ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __UpperCAmelCase = -1 if predicted_q < threshold: __UpperCAmelCase = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __UpperCAmelCase = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __UpperCAmelCase = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __UpperCAmelCase = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ) test_perps.append(snake_case_ ) print('''Test perplexity, step''' , snake_case_ , ''':''' , snake_case_ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , snake_case_ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=snake_case_ , default=snake_case_ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=snake_case_ , default=snake_case_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=snake_case_ , type=snake_case_ , required=snake_case_ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=snake_case_ , type=snake_case_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=snake_case_ , default=snake_case_ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=snake_case_ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=snake_case_ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=snake_case_ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=snake_case_ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=snake_case_ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=snake_case_ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=snake_case_ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=snake_case_ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=snake_case_ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=snake_case_ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=snake_case_ , type=snake_case_ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=snake_case_ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=snake_case_ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=snake_case_ , type=snake_case_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=snake_case_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner __UpperCAmelCase = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner __UpperCAmelCase = training_secondary_learner( snake_case_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model __UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __UpperCAmelCase , __UpperCAmelCase = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=snake_case_ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( snake_case_ , snake_case_ , snake_case_ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case_ , secondary_learner=snake_case_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
49
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
1
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=13 , _lowercase : Union[str, Any]=[30, 30] , _lowercase : int=2 , _lowercase : Dict=3 , _lowercase : Optional[int]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]=32 , _lowercase : Tuple=5 , _lowercase : Optional[Any]=4 , _lowercase : Any=37 , _lowercase : Optional[int]="gelu" , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Dict=10 , _lowercase : Tuple=0.02 , _lowercase : Optional[int]=3 , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=8 , _lowercase : Tuple=10 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = initializer_range __UpperCAmelCase = num_labels __UpperCAmelCase = scope __UpperCAmelCase = n_targets __UpperCAmelCase = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __UpperCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size) __UpperCAmelCase = num_patches + 1 + self.num_detection_tokens def a ( self : str ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) __UpperCAmelCase = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __UpperCAmelCase = [] for i in range(self.batch_size ): __UpperCAmelCase = {} __UpperCAmelCase = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_lowercase ) __UpperCAmelCase = torch.rand(self.n_targets , 4 , device=_lowercase ) labels.append(_lowercase ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : Any ): return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def a ( self : Dict , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[Any] ): __UpperCAmelCase = YolosModel(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def a ( self : List[Any] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict ): __UpperCAmelCase = YolosForObjectDetection(_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(pixel_values=_lowercase ) __UpperCAmelCase = model(_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) __UpperCAmelCase = model(pixel_values=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def a ( self : Tuple ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else () a__ : Optional[Any] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) a__ : List[Any] = False a__ : Any = False a__ : int = False a__ : List[Any] = False def a ( self : List[str] , _lowercase : str , _lowercase : Optional[int] , _lowercase : str=False ): __UpperCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __UpperCAmelCase = [] for i in range(self.model_tester.batch_size ): __UpperCAmelCase = {} __UpperCAmelCase = torch.ones( size=(self.model_tester.n_targets,) , device=_lowercase , dtype=torch.long ) __UpperCAmelCase = torch.ones( self.model_tester.n_targets , 4 , device=_lowercase , dtype=torch.float ) labels.append(_lowercase ) __UpperCAmelCase = labels return inputs_dict def a ( self : Dict ): __UpperCAmelCase = YolosModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 ) def a ( self : List[Any] ): self.config_tester.run_common_tests() def a ( self : Optional[int] ): # YOLOS does not use inputs_embeds pass def a ( self : Any ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) ) def a ( self : Any ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowercase ) def a ( self : str ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def a ( self : Any ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = True # in YOLOS, the seq_len is different __UpperCAmelCase = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.attentions self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase = True __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.attentions self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __UpperCAmelCase = len(_lowercase ) # Check attention is always last and order is fine __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = 1 self.assertEqual(out_len + added_hidden_states , len(_lowercase ) ) __UpperCAmelCase = outputs.attentions self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def a ( self : str ): def check_hidden_states_output(_lowercase : List[Any] , _lowercase : Any , _lowercase : Optional[Any] ): __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_lowercase ) , _lowercase ) # YOLOS has a different seq_length __UpperCAmelCase = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) def a ( self : Dict ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_lowercase ) @slow def a ( self : Optional[Any] ): for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = YolosModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def lowercase__ ( ): __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def a ( self : str ): return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def a ( self : List[str] ): __UpperCAmelCase = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_lowercase ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(inputs.pixel_values ) # verify outputs __UpperCAmelCase = torch.Size((1, 1_00, 92) ) self.assertEqual(outputs.logits.shape , _lowercase ) __UpperCAmelCase = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_lowercase , ) __UpperCAmelCase = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 ) ) # verify postprocessing __UpperCAmelCase = image_processor.post_process_object_detection( _lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] __UpperCAmelCase = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(_lowercase ) __UpperCAmelCase = [75, 75, 17, 63, 17] __UpperCAmelCase = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(_lowercase ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , _lowercase , atol=1E-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , _lowercase ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _lowercase ) )
49
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Any = StableUnCLIPPipeline a__ : Dict = TEXT_TO_IMAGE_PARAMS a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ : Optional[int] = False def a ( self : List[str] ): __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ): if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def a ( self : Any ): __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=_lowercase ) def a ( self : int ): __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowercase ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Any ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase ) def a ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : Any = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys _lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowercase__ ( snake_case_ :Optional[Any] ): __UpperCAmelCase = 384 if "tiny" in model_name: __UpperCAmelCase = [3, 3, 9, 3] __UpperCAmelCase = [96, 192, 384, 768] if "small" in model_name: __UpperCAmelCase = [3, 3, 27, 3] __UpperCAmelCase = [96, 192, 384, 768] if "base" in model_name: __UpperCAmelCase = [3, 3, 27, 3] __UpperCAmelCase = [128, 256, 512, 1_024] __UpperCAmelCase = 512 if "large" in model_name: __UpperCAmelCase = [3, 3, 27, 3] __UpperCAmelCase = [192, 384, 768, 1_536] __UpperCAmelCase = 768 if "xlarge" in model_name: __UpperCAmelCase = [3, 3, 27, 3] __UpperCAmelCase = [256, 512, 1_024, 2_048] __UpperCAmelCase = 1_024 # set label information __UpperCAmelCase = 150 __UpperCAmelCase = '''huggingface/label-files''' __UpperCAmelCase = '''ade20k-id2label.json''' __UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} __UpperCAmelCase = {v: k for k, v in idalabel.items()} __UpperCAmelCase = ConvNextConfig( depths=snake_case_ , hidden_sizes=snake_case_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) __UpperCAmelCase = UperNetConfig( backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowercase__ ( snake_case_ :Dict ): __UpperCAmelCase = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Any ): __UpperCAmelCase = dct.pop(snake_case_ ) __UpperCAmelCase = val def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :List[Any] ): __UpperCAmelCase = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } __UpperCAmelCase = model_name_to_url[model_name] __UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''state_dict'''] __UpperCAmelCase = get_upernet_config(snake_case_ ) __UpperCAmelCase = UperNetForSemanticSegmentation(snake_case_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCAmelCase = state_dict.pop(snake_case_ ) if "bn" in key: __UpperCAmelCase = key.replace('''bn''' , '''batch_norm''' ) __UpperCAmelCase = val # rename keys __UpperCAmelCase = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) model.load_state_dict(snake_case_ ) # verify on image __UpperCAmelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' __UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' ) __UpperCAmelCase = SegformerImageProcessor() __UpperCAmelCase = processor(snake_case_ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): __UpperCAmelCase = model(snake_case_ ) if model_name == "upernet-convnext-tiny": __UpperCAmelCase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __UpperCAmelCase = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __UpperCAmelCase = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __UpperCAmelCase = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __UpperCAmelCase = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": _lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowercase : Dict = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : str = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _lowercase : int = { 'yjernite/retribert-base-uncased': 5_12, } _lowercase : Any = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = PRETRAINED_INIT_CONFIGURATION a__ : Optional[Any] = RetriBertTokenizer a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ): super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = strip_accents __UpperCAmelCase = tokenize_chinese_chars __UpperCAmelCase = normalizer_class(**_lowercase ) __UpperCAmelCase = do_lower_case def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ): __UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :List[str] ): __UpperCAmelCase = len(snake_case_ ) for i in range(length - 1 ): __UpperCAmelCase = i for k in range(i + 1 , snake_case_ ): if collection[k] < collection[least]: __UpperCAmelCase = k if least != i: __UpperCAmelCase , __UpperCAmelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": _lowercase : Dict = input('Enter numbers separated by a comma:\n').strip() _lowercase : Optional[Any] = [int(item) for item in user_input.split(',')] print(selection_sort(unsorted))
49
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _lowercase : Dict = 'bart' _lowercase : Dict = True @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __UpperCAmelCase = qar_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = (None, None) if MODEL_TYPE == "bart": __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __UpperCAmelCase = sas_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = faiss.StandardGpuResources() __UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __UpperCAmelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) __UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ ) wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU else: __UpperCAmelCase , __UpperCAmelCase = (None, None) __UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __UpperCAmelCase = elia['''train_eli5'''] __UpperCAmelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(snake_case_ ) return (elia_train, eli5_train_q_index) _lowercase ,_lowercase ,_lowercase : Dict = load_indexes() _lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models() _lowercase ,_lowercase : Tuple = load_train_data() def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ): __UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ ) __UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]] return nn_examples def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ): if source == "none": __UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase , __UpperCAmelCase = query_es_index( snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , ) __UpperCAmelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None), } ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ): with torch.no_grad(): __UpperCAmelCase = qa_sas_generate( snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar _lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' _lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) _lowercase : str = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] _lowercase : Optional[int] = st.sidebar.checkbox('Demo options') if demo_options: _lowercase : Tuple = st.sidebar.selectbox( '', action_list, index=3, ) _lowercase : List[str] = action_list.index(action_st) _lowercase : str = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) _lowercase : int = show_type == 'Show full text of passages' else: _lowercase : str = 3 _lowercase : List[Any] = True _lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options') if retrieval_options: _lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) _lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: _lowercase : List[str] = 'wiki40b' _lowercase : Optional[int] = 'dense' _lowercase : List[Any] = 'beam' _lowercase : str = 2 _lowercase : Optional[int] = 64 _lowercase : Union[str, Any] = 2_56 _lowercase : List[str] = None _lowercase : Optional[int] = None _lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options') if generate_options: _lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) _lowercase : Optional[int] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) _lowercase : Optional[Any] = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": _lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _lowercase : List[Any] = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _lowercase : Dict = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _lowercase : Union[str, Any] = None # start main text _lowercase : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] _lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": _lowercase : Optional[Any] = st.text_input('Enter your question here:', '') else: _lowercase : int = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": _lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10) _lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) _lowercase : Dict = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _lowercase : Any = support_list[:10] _lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: _lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _lowercase ,_lowercase : Union[str, Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): _lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) _lowercase : Any = res[1].strip() if sec_titles == "": _lowercase : Dict = '[{}]({})'.format(res[0], wiki_url) else: _lowercase : List[Any] = sec_titles.split(' & ') _lowercase : int = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: _lowercase : List[Any] = find_nearest_training(question) _lowercase : Tuple = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) _lowercase : int = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) _lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = CycleDiffusionPipeline a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __UpperCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __UpperCAmelCase = CLIPTextModel(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image / 2 + 0.5 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def a ( self : Optional[int] ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowercase , '''half''' ): __UpperCAmelCase = module.half() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def a ( self : Tuple ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def a ( self : List[str] ): return super().test_inference_batch_single_identical() @skip_mps def a ( self : int ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def a ( self : str ): return super().test_save_load_optional_components() @skip_mps def a ( self : int ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : int ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained( _lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def a ( self : Optional[Any] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
49
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : str = KandinskyInpaintPipeline a__ : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] a__ : List[str] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] a__ : Optional[Any] = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a__ : List[Any] = False @property def a ( self : Optional[int] ): return 32 @property def a ( self : List[str] ): return 32 @property def a ( self : Union[str, Any] ): return self.time_input_dim @property def a ( self : Union[str, Any] ): return self.time_input_dim * 4 @property def a ( self : Union[str, Any] ): return 1_00 @property def a ( self : Dict ): __UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def a ( self : List[str] ): torch.manual_seed(0 ) __UpperCAmelCase = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) __UpperCAmelCase = MultilingualCLIP(_lowercase ) __UpperCAmelCase = text_encoder.eval() return text_encoder @property def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __UpperCAmelCase = UNetaDConditionModel(**_lowercase ) return model @property def a ( self : List[str] ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self : int ): torch.manual_seed(0 ) __UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def a ( self : List[Any] ): __UpperCAmelCase = self.dummy_text_encoder __UpperCAmelCase = self.dummy_tokenizer __UpperCAmelCase = self.dummy_unet __UpperCAmelCase = self.dummy_movq __UpperCAmelCase = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , ) __UpperCAmelCase = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def a ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : Tuple=0 ): __UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase ) # create init_image __UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create mask __UpperCAmelCase = np.ones((64, 64) , dtype=np.floataa ) __UpperCAmelCase = 0 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def a ( self : Union[str, Any] ): __UpperCAmelCase = '''cpu''' __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) ) __UpperCAmelCase = output.images __UpperCAmelCase = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __UpperCAmelCase = np.array( [0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def a ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Tuple ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __UpperCAmelCase = np.ones((7_68, 7_68) , dtype=np.floataa ) __UpperCAmelCase = 0 __UpperCAmelCase = '''a hat''' __UpperCAmelCase = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) __UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase , __UpperCAmelCase = pipe_prior( _lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __UpperCAmelCase = pipeline( _lowercase , image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
49
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'} _lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _lowercase : List[str] = { 'google/rembert': 2_56, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Union[str, Any] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ): super().__init__( do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(_lowercase ) @property def a ( self : int ): return len(self.sp_model ) def a ( self : Tuple ): __UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Tuple , _lowercase : str ): __UpperCAmelCase = d __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ): __UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase ) return pieces def a ( self : int , _lowercase : List[str] ): return self.sp_model.PieceToId(_lowercase ) def a ( self : List[str] , _lowercase : str ): return self.sp_model.IdToPiece(_lowercase ) def a ( self : Any , _lowercase : Dict ): __UpperCAmelCase = self.sp_model.decode_pieces(_lowercase ) return out_string def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
49
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } _lowercase : List[Any] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :List[str] ): for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCAmelCase = '''lm_head''' __UpperCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: __UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: __UpperCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase = value elif weight_type == "weight_g": __UpperCAmelCase = value elif weight_type == "weight_v": __UpperCAmelCase = value elif weight_type == "bias": __UpperCAmelCase = value else: __UpperCAmelCase = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :Optional[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = fairseq_model.state_dict() __UpperCAmelCase = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): __UpperCAmelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __UpperCAmelCase = True if "*" in mapped_key: __UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2] __UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ ) if "weight_g" in name: __UpperCAmelCase = '''weight_g''' elif "weight_v" in name: __UpperCAmelCase = '''weight_v''' elif "bias" in name: __UpperCAmelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase = '''weight''' else: __UpperCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :int , snake_case_ :List[Any] ): __UpperCAmelCase = full_name.split('''conv_layers.''' )[-1] __UpperCAmelCase = name.split('''.''' ) __UpperCAmelCase = int(items[0] ) __UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __UpperCAmelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[int] , snake_case_ :List[str]=None , snake_case_ :Optional[Any]=None , snake_case_ :Optional[int]=True ): if config_path is not None: __UpperCAmelCase = UniSpeechConfig.from_pretrained(snake_case_ ) else: __UpperCAmelCase = UniSpeechConfig() if is_finetuned: if dict_path: __UpperCAmelCase = Dictionary.load_from_json(snake_case_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCAmelCase = target_dict.pad_index __UpperCAmelCase = target_dict.bos_index __UpperCAmelCase = target_dict.eos_index __UpperCAmelCase = len(target_dict.symbols ) __UpperCAmelCase = os.path.join(snake_case_ , '''vocab.json''' ) if not os.path.isdir(snake_case_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) ) return os.makedirs(snake_case_ , exist_ok=snake_case_ ) __UpperCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCAmelCase = 42 __UpperCAmelCase = 43 with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(snake_case_ , snake_case_ ) __UpperCAmelCase = WavaVecaPhonemeCTCTokenizer( snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case_ , ) __UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False __UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , ) __UpperCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ ) processor.save_pretrained(snake_case_ ) __UpperCAmelCase = UniSpeechForCTC(snake_case_ ) else: __UpperCAmelCase = UniSpeechForPreTraining(snake_case_ ) if is_finetuned: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCAmelCase = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ , snake_case_ ) hf_unispeech.save_pretrained(snake_case_ ) if __name__ == "__main__": _lowercase : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) _lowercase : List[str] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
49
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : List[Any] = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : int = LEDTokenizer a__ : Optional[Any] = LEDTokenizerFast a__ : List[Any] = True def a ( self : int ): super().setUp() __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowercase ) ) def a ( self : str , **_lowercase : Any ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Optional[int] , **_lowercase : int ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : List[Any] ): return "lower newer", "lower newer" @cached_property def a ( self : int ): return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def a ( self : Union[str, Any] ): return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def a ( self : Tuple ): __UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __UpperCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors='''pt''' ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(_lowercase , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = tokenizer(_lowercase , padding=_lowercase , return_tensors='''pt''' ) self.assertIn('''input_ids''' , _lowercase ) self.assertIn('''attention_mask''' , _lowercase ) self.assertNotIn('''labels''' , _lowercase ) self.assertNotIn('''decoder_attention_mask''' , _lowercase ) @require_torch def a ( self : Union[str, Any] ): __UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = tokenizer(text_target=_lowercase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def a ( self : Any ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=_lowercase , truncation=_lowercase , return_tensors='''pt''' ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def a ( self : str ): __UpperCAmelCase = ['''A long paragraph for summarization.'''] __UpperCAmelCase = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = tokenizer(text_target=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = inputs['''input_ids'''] __UpperCAmelCase = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def a ( self : List[str] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase = ['''Summary of the text.''', '''Another summary.'''] __UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase = tokenizer(_lowercase , padding=_lowercase ) __UpperCAmelCase = [[0] * len(_lowercase ) for x in encoded_output['''input_ids''']] __UpperCAmelCase = tokenizer.pad(_lowercase ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , _lowercase ) def a ( self : Tuple ): pass def a ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) __UpperCAmelCase = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( _lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( _lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
49
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowercase : List[Any] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase__ ( snake_case_ :Union[str, Any] ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): if args.student_type == "roberta": __UpperCAmelCase = False elif args.student_type == "gpt2": __UpperCAmelCase = False def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ): if args.student_type == "roberta": __UpperCAmelCase = False def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' ) __UpperCAmelCase = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __UpperCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ ) __UpperCAmelCase = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __UpperCAmelCase = special_tok_ids __UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __UpperCAmelCase = 0.0 # do not predict special tokens __UpperCAmelCase = torch.from_numpy(snake_case_ ) else: __UpperCAmelCase = None __UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __UpperCAmelCase = student_config_class.from_pretrained(args.student_config ) __UpperCAmelCase = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: __UpperCAmelCase = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __UpperCAmelCase = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
49
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : int = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : Tuple = 25_00_04 _lowercase : Optional[Any] = 25_00_20 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = MBartaaTokenizer a__ : List[str] = MBartaaTokenizerFast a__ : Union[str, Any] = True a__ : Optional[Any] = True def a ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Any ): __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def a ( self : List[str] ): __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowercase ) , 10_54 ) def a ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def a ( self : List[Any] ): __UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def a ( self : List[str] ): # fmt: off __UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def a ( self : Any ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) __UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase ) __UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): a__ : Tuple = "facebook/mbart-large-50-one-to-many-mmt" a__ : Optional[Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] a__ : List[str] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] a__ : Dict = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def a ( cls : Tuple ): __UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) __UpperCAmelCase = 1 return cls def a ( self : Tuple ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def a ( self : int ): __UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) def a ( self : Union[str, Any] ): self.assertIn(_lowercase , self.tokenizer.all_special_ids ) __UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) __UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def a ( self : Any ): __UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _lowercase ) __UpperCAmelCase = 10 __UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0] self.assertEqual(ids[0] , _lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_lowercase ) , _lowercase ) def a ( self : Dict ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def a ( self : Any ): __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowercase ) __UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase ) @require_torch def a ( self : Dict ): __UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def a ( self : Optional[int] ): __UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) __UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' ) __UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' ) __UpperCAmelCase = targets['''input_ids'''] __UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self : List[str] ): __UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :int = 4_000_000 ): __UpperCAmelCase = [0, 1] __UpperCAmelCase = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __UpperCAmelCase = 0 for j in range(len(snake_case_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
49
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowercase : Union[str, Any] = logging.getLogger(__name__) _lowercase : Optional[Any] = 'Hello world! cécé herlolip' _lowercase : str = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase__ ( snake_case_ :Any , snake_case_ :int ): __UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) __UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage ) __UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ ) original.eval() __UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) __UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs __UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) __UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __UpperCAmelCase = encoder_input_ids __UpperCAmelCase = decoder_input_ids __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = original.generator(snake_case_ ) __UpperCAmelCase = new_model( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = new_model.generator(snake_case_ ) __UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _lowercase : List[str] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : Tuple = ["torch", "transformers", "onnx"] def __init__( self : Optional[int] , *_lowercase : int , **_lowercase : Optional[int] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Tuple , *_lowercase : List[Any] , **_lowercase : Tuple ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : List[str] , *_lowercase : Optional[int] , **_lowercase : List[Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : Optional[int] = ["torch", "transformers", "onnx"] def __init__( self : Tuple , *_lowercase : Optional[Any] , **_lowercase : List[Any] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Optional[Any] , *_lowercase : Tuple , **_lowercase : Tuple ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Dict , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : Optional[Any] = ["torch", "transformers", "onnx"] def __init__( self : List[str] , *_lowercase : int , **_lowercase : List[Any] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Any , *_lowercase : str , **_lowercase : Any ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : int , *_lowercase : Dict , **_lowercase : List[Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : str = ["torch", "transformers", "onnx"] def __init__( self : str , *_lowercase : str , **_lowercase : Optional[int] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Union[str, Any] , *_lowercase : Any , **_lowercase : Tuple ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Tuple , *_lowercase : Tuple , **_lowercase : str ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : int = ["torch", "transformers", "onnx"] def __init__( self : List[str] , *_lowercase : List[str] , **_lowercase : Any ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : List[str] , *_lowercase : List[str] , **_lowercase : Union[str, Any] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : int , *_lowercase : Any , **_lowercase : Optional[int] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class _UpperCAmelCase ( metaclass=_lowerCAmelCase ): a__ : Any = ["torch", "transformers", "onnx"] def __init__( self : int , *_lowercase : Any , **_lowercase : Optional[int] ): requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Tuple , *_lowercase : Any , **_lowercase : Optional[int] ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def a ( cls : Optional[Any] , *_lowercase : Optional[int] , **_lowercase : int ): requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
49
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def a ( self : List[str] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ): __UpperCAmelCase = ort.SessionOptions() __UpperCAmelCase = False return options def a ( self : Any ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self : Optional[int] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
49
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowercase : str = 'platform' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :Tuple=None , snake_case_ :Optional[Any]=None , snake_case_ :Union[str, Any]=None , snake_case_ :Optional[int]=None , snake_case_ :Union[str, Any]=None , snake_case_ :Dict=None , ): if attention_mask is None: __UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : str=13 , _lowercase : Dict=7 , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , _lowercase : str=99 , _lowercase : List[str]=16 , _lowercase : List[str]=2 , _lowercase : str=4 , _lowercase : Optional[Any]=4 , _lowercase : List[Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[Any]=32 , _lowercase : List[str]=2 , _lowercase : Optional[Any]=1 , _lowercase : Any=0 , _lowercase : Tuple=0.02 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = eos_token_id __UpperCAmelCase = pad_token_id __UpperCAmelCase = bos_token_id __UpperCAmelCase = initializer_range def a ( self : List[str] ): __UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __UpperCAmelCase = shift_tokens_right(_lowercase , 1 , 2 ) __UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , ) __UpperCAmelCase = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase ) return config, inputs_dict def a ( self : Tuple ): __UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str] ): __UpperCAmelCase = 20 __UpperCAmelCase = model_class_name(_lowercase ) __UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] ) __UpperCAmelCase , __UpperCAmelCase = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase ) __UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , ) __UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , ) __UpperCAmelCase = model.decode(_lowercase , _lowercase ) __UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def a ( self : str , _lowercase : Any , _lowercase : List[str] , _lowercase : List[Any] ): __UpperCAmelCase = 20 __UpperCAmelCase = model_class_name(_lowercase ) __UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] ) __UpperCAmelCase , __UpperCAmelCase = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __UpperCAmelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase ) __UpperCAmelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase = model.decode( decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , ) __UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __UpperCAmelCase = model.decode( decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , ) __UpperCAmelCase = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase ) __UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class _UpperCAmelCase ( unittest.TestCase ): a__ : int = 99 def a ( self : Optional[Any] ): __UpperCAmelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __UpperCAmelCase = input_ids.shape[0] __UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def a ( self : str ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data() __UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowercase ) __UpperCAmelCase = lm_model(input_ids=_lowercase ) __UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _lowercase ) def a ( self : Tuple ): __UpperCAmelCase = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowercase ) __UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __UpperCAmelCase = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase ) __UpperCAmelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _lowercase ) def a ( self : str ): __UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __UpperCAmelCase = shift_tokens_right(_lowercase , 1 , 2 ) __UpperCAmelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum() __UpperCAmelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowercase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase , _lowerCAmelCase ): a__ : List[Any] = True a__ : Union[str, Any] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) a__ : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def a ( self : Optional[int] ): __UpperCAmelCase = FlaxBlenderbotSmallModelTester(self ) def a ( self : Optional[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase ) def a ( self : int ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase ) def a ( self : List[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) __UpperCAmelCase = model_class(_lowercase ) @jax.jit def encode_jitted(_lowercase : int , _lowercase : str=None , **_lowercase : List[Any] ): return model.encode(input_ids=_lowercase , attention_mask=_lowercase ) with self.subTest('''JIT Enabled''' ): __UpperCAmelCase = encode_jitted(**_lowercase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __UpperCAmelCase = encode_jitted(**_lowercase ).to_tuple() self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for jitted_output, output in zip(_lowercase , _lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def a ( self : Optional[int] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) __UpperCAmelCase = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[str] ): return model.decode( decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , ) with self.subTest('''JIT Enabled''' ): __UpperCAmelCase = decode_jitted(**_lowercase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __UpperCAmelCase = decode_jitted(**_lowercase ).to_tuple() self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for jitted_output, output in zip(_lowercase , _lowercase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a ( self : Dict ): for model_class_name in self.all_model_classes: __UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id __UpperCAmelCase = model(_lowercase ) self.assertIsNotNone(_lowercase )
49
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( snake_case_ :Dict , snake_case_ :int ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __UpperCAmelCase = features.copy() __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ): if issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = jsonl_path elif issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = [jsonl_path] __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: __UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ): if split: __UpperCAmelCase = {split: jsonl_path} else: __UpperCAmelCase = '''train''' __UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( snake_case_ :Optional[int] ): return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Any ): return [json.loads(snake_case_ ) for line in buffer] class _UpperCAmelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 def a ( self : int , _lowercase : Any ): with pytest.raises(_lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ): __UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' __UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() assert exported_content == original_content
49
1
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "char" a__ : List[Any] = "bpe" a__ : str = "wp" _lowercase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[Any] = ["image_processor", "char_tokenizer"] a__ : Dict = "ViTImageProcessor" a__ : Dict = "MgpstrTokenizer" def __init__( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , **_lowercase : List[str] ): __UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _lowercase , ) __UpperCAmelCase = kwargs.pop('''feature_extractor''' ) __UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __UpperCAmelCase = tokenizer __UpperCAmelCase = AutoTokenizer.from_pretrained('''gpt2''' ) __UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(_lowercase , _lowercase ) def __call__( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , _lowercase : Tuple=None , **_lowercase : Tuple ): if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __UpperCAmelCase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase ) if text is not None: __UpperCAmelCase = self.char_tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) if text is None: return inputs elif images is None: return encodings else: __UpperCAmelCase = encodings['''input_ids'''] return inputs def a ( self : Tuple , _lowercase : Tuple ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = sequences __UpperCAmelCase = char_preds.size(0 ) __UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''char''' ) __UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''bpe''' ) __UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''wp''' ) __UpperCAmelCase = [] __UpperCAmelCase = [] for i in range(_lowercase ): __UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]] __UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]] __UpperCAmelCase = scores.index(max(_lowercase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __UpperCAmelCase = {} __UpperCAmelCase = final_strs __UpperCAmelCase = final_scores __UpperCAmelCase = char_strs __UpperCAmelCase = bpe_strs __UpperCAmelCase = wp_strs return out def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : str ): if format == DecodeType.CHARACTER: __UpperCAmelCase = self.char_decode __UpperCAmelCase = 1 __UpperCAmelCase = '''[s]''' elif format == DecodeType.BPE: __UpperCAmelCase = self.bpe_decode __UpperCAmelCase = 2 __UpperCAmelCase = '''#''' elif format == DecodeType.WORDPIECE: __UpperCAmelCase = self.wp_decode __UpperCAmelCase = 1_02 __UpperCAmelCase = '''[SEP]''' else: raise ValueError(F'''Format {format} is not supported.''' ) __UpperCAmelCase , __UpperCAmelCase = [], [] __UpperCAmelCase = pred_logits.size(0 ) __UpperCAmelCase = pred_logits.size(1 ) __UpperCAmelCase , __UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=_lowercase , sorted=_lowercase ) __UpperCAmelCase = preds_index.view(-1 , _lowercase )[:, 1:] __UpperCAmelCase = decoder(_lowercase ) __UpperCAmelCase , __UpperCAmelCase = torch.nn.functional.softmax(_lowercase , dim=2 ).max(dim=2 ) __UpperCAmelCase = preds_max_prob[:, 1:] for index in range(_lowercase ): __UpperCAmelCase = preds_str[index].find(_lowercase ) __UpperCAmelCase = preds_str[index][:pred_eos] __UpperCAmelCase = preds_index[index].cpu().tolist() __UpperCAmelCase = pred_index.index(_lowercase ) if eos_token in pred_index else -1 __UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1] __UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_lowercase ) conf_scores.append(_lowercase ) return dec_strs, conf_scores def a ( self : List[Any] , _lowercase : int ): __UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_lowercase )] return decode_strs def a ( self : Optional[Any] , _lowercase : str ): return self.bpe_tokenizer.batch_decode(_lowercase ) def a ( self : int , _lowercase : str ): __UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_lowercase )] return decode_strs
49
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
1
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Tuple , _lowercase : str , _lowercase : Union[str, Any]=None , _lowercase : Tuple=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] ): __UpperCAmelCase = parent __UpperCAmelCase = config_class __UpperCAmelCase = has_text_modality __UpperCAmelCase = kwargs __UpperCAmelCase = common_properties def a ( self : Tuple ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowercase , _lowercase ) , msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_lowercase ): try: setattr(_lowercase , _lowercase , _lowercase ) self.parent.assertEqual( getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowercase ): try: __UpperCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def a ( self : List[Any] ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowercase ) def a ( self : Tuple ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = os.path.join(_lowercase , '''config.json''' ) config_first.to_json_file(_lowercase ) __UpperCAmelCase = self.config_class.from_json_file(_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Dict ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowercase ) __UpperCAmelCase = self.config_class.from_pretrained(_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Dict ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = os.path.join(_lowercase , _lowercase ) config_first.save_pretrained(_lowercase ) __UpperCAmelCase = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) __UpperCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def a ( self : Any ): if self.config_class.is_composition: return __UpperCAmelCase = self.config_class() self.parent.assertIsNotNone(_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = copy.deepcopy(_lowercase ) __UpperCAmelCase = self.config_class(**_lowercase ) __UpperCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(_lowercase , _lowercase ) != value: wrong_values.append((key, getattr(_lowercase , _lowercase ), value) ) if len(_lowercase ) > 0: __UpperCAmelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def a ( self : Tuple ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
49
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" from __future__ import annotations _lowercase : int = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class _UpperCAmelCase : def __init__( self : Dict , _lowercase : dict[str, list[str]] , _lowercase : str ): __UpperCAmelCase = graph # mapping node to its parent in resulting breadth first tree __UpperCAmelCase = {} __UpperCAmelCase = source_vertex def a ( self : List[Any] ): __UpperCAmelCase = {self.source_vertex} __UpperCAmelCase = None __UpperCAmelCase = [self.source_vertex] # first in first out queue while queue: __UpperCAmelCase = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_lowercase ) __UpperCAmelCase = vertex queue.append(_lowercase ) def a ( self : Optional[Any] , _lowercase : str ): if target_vertex == self.source_vertex: return self.source_vertex __UpperCAmelCase = self.parent.get(_lowercase ) if target_vertex_parent is None: __UpperCAmelCase = ( F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(_lowercase ) return self.shortest_path(_lowercase ) + F'''->{target_vertex}''' if __name__ == "__main__": _lowercase : Tuple = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
49
"""simple docstring""" def lowercase__ ( snake_case_ :dict ): __UpperCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCAmelCase = set() return any( node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for node in graph ) def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ): visited.add(snake_case_ ) rec_stk.add(snake_case_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(snake_case_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :int , snake_case_ :Union[str, Any] ): if isinstance(snake_case_ , torch.Tensor ): return image elif isinstance(snake_case_ , PIL.Image.Image ): __UpperCAmelCase = [image] if isinstance(image[0] , PIL.Image.Image ): __UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] __UpperCAmelCase = np.concatenate(snake_case_ , axis=0 ) __UpperCAmelCase = np.array(snake_case_ ).astype(np.floataa ) / 255.0 __UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 ) __UpperCAmelCase = 2.0 * image - 1.0 __UpperCAmelCase = torch.from_numpy(snake_case_ ) elif isinstance(image[0] , torch.Tensor ): __UpperCAmelCase = torch.cat(snake_case_ , dim=0 ) return image def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :Union[str, Any]=0.9995 ): if not isinstance(snake_case_ , np.ndarray ): __UpperCAmelCase = True __UpperCAmelCase = va.device __UpperCAmelCase = va.cpu().numpy() __UpperCAmelCase = va.cpu().numpy() __UpperCAmelCase = np.sum(va * va / (np.linalg.norm(snake_case_ ) * np.linalg.norm(snake_case_ )) ) if np.abs(snake_case_ ) > DOT_THRESHOLD: __UpperCAmelCase = (1 - t) * va + t * va else: __UpperCAmelCase = np.arccos(snake_case_ ) __UpperCAmelCase = np.sin(snake_case_ ) __UpperCAmelCase = theta_a * t __UpperCAmelCase = np.sin(snake_case_ ) __UpperCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a __UpperCAmelCase = sin_theta_t / sin_theta_a __UpperCAmelCase = sa * va + sa * va if inputs_are_torch: __UpperCAmelCase = torch.from_numpy(snake_case_ ).to(snake_case_ ) return va def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[Any] ): __UpperCAmelCase = F.normalize(snake_case_ , dim=-1 ) __UpperCAmelCase = F.normalize(snake_case_ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Optional[int] ): for param in model.parameters(): __UpperCAmelCase = value class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : List[str] , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _lowercase : CLIPFeatureExtractor , _lowercase : Optional[Any]=None , _lowercase : Any=None , _lowercase : Optional[Any]=None , ): super().__init__() self.register_modules( vae=_lowercase , text_encoder=_lowercase , clip_model=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , coca_model=_lowercase , coca_tokenizer=_lowercase , coca_transform=_lowercase , ) __UpperCAmelCase = ( feature_extractor.size if isinstance(feature_extractor.size , _lowercase ) else feature_extractor.size['''shortest_edge'''] ) __UpperCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _lowercase ) set_requires_grad(self.clip_model , _lowercase ) def a ( self : Optional[int] , _lowercase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowercase ) def a ( self : List[Any] ): self.enable_attention_slicing(_lowercase ) def a ( self : Tuple ): set_requires_grad(self.vae , _lowercase ) def a ( self : List[str] ): set_requires_grad(self.vae , _lowercase ) def a ( self : List[Any] ): set_requires_grad(self.unet , _lowercase ) def a ( self : List[str] ): set_requires_grad(self.unet , _lowercase ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[str] ): # get the original timestep using init_timestep __UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase ) __UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) __UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a ( self : Optional[Any] , _lowercase : Any , _lowercase : List[str] , _lowercase : Any , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Any=None ): if not isinstance(_lowercase , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_lowercase )}''' ) __UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase ) if isinstance(_lowercase , _lowercase ): __UpperCAmelCase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase ) ] __UpperCAmelCase = torch.cat(_lowercase , dim=0 ) else: __UpperCAmelCase = self.vae.encode(_lowercase ).latent_dist.sample(_lowercase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __UpperCAmelCase = 0.18_215 * init_latents __UpperCAmelCase = init_latents.repeat_interleave(_lowercase , dim=0 ) __UpperCAmelCase = randn_tensor(init_latents.shape , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents __UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = init_latents return latents def a ( self : List[str] , _lowercase : Any ): __UpperCAmelCase = self.coca_transform(_lowercase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __UpperCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) __UpperCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def a ( self : int , _lowercase : List[str] , _lowercase : Optional[int] ): __UpperCAmelCase = self.feature_extractor.preprocess(_lowercase ) __UpperCAmelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() __UpperCAmelCase = self.clip_model.get_image_features(_lowercase ) __UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowercase ) __UpperCAmelCase = image_embeddings_clip.repeat_interleave(_lowercase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def a ( self : Tuple , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[str] , ): __UpperCAmelCase = latents.detach().requires_grad_() __UpperCAmelCase = self.scheduler.scale_model_input(_lowercase , _lowercase ) # predict the noise residual __UpperCAmelCase = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __UpperCAmelCase = self.scheduler.alphas_cumprod[timestep] __UpperCAmelCase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __UpperCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __UpperCAmelCase = torch.sqrt(_lowercase ) __UpperCAmelCase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _lowercase ): __UpperCAmelCase = self.scheduler.sigmas[index] __UpperCAmelCase = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __UpperCAmelCase = 1 / 0.18_215 * sample __UpperCAmelCase = self.vae.decode(_lowercase ).sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = transforms.Resize(self.feature_extractor_size )(_lowercase ) __UpperCAmelCase = self.normalize(_lowercase ).to(latents.dtype ) __UpperCAmelCase = self.clip_model.get_image_features(_lowercase ) __UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowercase ) __UpperCAmelCase = spherical_dist_loss(_lowercase , _lowercase ).mean() * clip_guidance_scale __UpperCAmelCase = -torch.autograd.grad(_lowercase , _lowercase )[0] if isinstance(self.scheduler , _lowercase ): __UpperCAmelCase = latents.detach() + grads * (sigma**2) __UpperCAmelCase = noise_pred_original else: __UpperCAmelCase = noise_pred_original - torch.sqrt(_lowercase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : List[Any] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[int] = 5_12 , _lowercase : Optional[int] = 5_12 , _lowercase : float = 0.6 , _lowercase : Optional[int] = 50 , _lowercase : Optional[float] = 7.5 , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[float] = 1_00 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : float = 0.8 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , ): if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_lowercase )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(_lowercase , torch.Generator ) and batch_size > 1: __UpperCAmelCase = [generator] + [None] * (batch_size - 1) __UpperCAmelCase = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] __UpperCAmelCase = [x[0] for x in coca_is_none if x[1]] __UpperCAmelCase = ''', '''.join(_lowercase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_lowercase ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) __UpperCAmelCase = self.get_image_description(_lowercase ) if style_prompt is None: if len(_lowercase ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) __UpperCAmelCase = self.get_image_description(_lowercase ) # get prompt text embeddings for content and style __UpperCAmelCase = self.tokenizer( _lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_lowercase , return_tensors='''pt''' , ) __UpperCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __UpperCAmelCase = self.tokenizer( _lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_lowercase , return_tensors='''pt''' , ) __UpperCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __UpperCAmelCase = slerp(_lowercase , _lowercase , _lowercase ) # duplicate text embeddings for each generation per prompt __UpperCAmelCase = text_embeddings.repeat_interleave(_lowercase , dim=0 ) # set timesteps __UpperCAmelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __UpperCAmelCase = {} if accepts_offset: __UpperCAmelCase = 1 self.scheduler.set_timesteps(_lowercase , **_lowercase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device ) __UpperCAmelCase = timesteps[:1].repeat(_lowercase ) # Preprocess image __UpperCAmelCase = preprocess(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self.prepare_latents( _lowercase , _lowercase , _lowercase , text_embeddings.dtype , self.device , _lowercase ) __UpperCAmelCase = preprocess(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self.prepare_latents( _lowercase , _lowercase , _lowercase , text_embeddings.dtype , self.device , _lowercase ) __UpperCAmelCase = slerp(_lowercase , _lowercase , _lowercase ) if clip_guidance_scale > 0: __UpperCAmelCase = self.get_clip_image_embeddings(_lowercase , _lowercase ) __UpperCAmelCase = self.get_clip_image_embeddings(_lowercase , _lowercase ) __UpperCAmelCase = slerp( _lowercase , _lowercase , _lowercase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __UpperCAmelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __UpperCAmelCase = content_text_input.input_ids.shape[-1] __UpperCAmelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=_lowercase , return_tensors='''pt''' ) __UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __UpperCAmelCase = uncond_embeddings.repeat_interleave(_lowercase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __UpperCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __UpperCAmelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __UpperCAmelCase = torch.randn(_lowercase , generator=_lowercase , device='''cpu''' , dtype=_lowercase ).to( self.device ) else: __UpperCAmelCase = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __UpperCAmelCase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __UpperCAmelCase = {} if accepts_eta: __UpperCAmelCase = eta # check if the scheduler accepts generator __UpperCAmelCase = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __UpperCAmelCase = generator with self.progress_bar(total=_lowercase ): for i, t in enumerate(_lowercase ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase = self.scheduler.scale_model_input(_lowercase , _lowercase ) # predict the noise residual __UpperCAmelCase = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample # perform classifier free guidance if do_classifier_free_guidance: __UpperCAmelCase , __UpperCAmelCase = noise_pred.chunk(2 ) __UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __UpperCAmelCase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __UpperCAmelCase , __UpperCAmelCase = self.cond_fn( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __UpperCAmelCase = 1 / 0.18_215 * latents __UpperCAmelCase = self.vae.decode(_lowercase ).sample __UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(_lowercase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
1
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowercase : Union[str, Any] = logging.getLogger(__name__) _lowercase : Optional[Any] = 'Hello world! cécé herlolip' _lowercase : str = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase__ ( snake_case_ :Any , snake_case_ :int ): __UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) __UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage ) __UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ ) original.eval() __UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) __UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs __UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) __UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __UpperCAmelCase = encoder_input_ids __UpperCAmelCase = decoder_input_ids __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = original.generator(snake_case_ ) __UpperCAmelCase = new_model( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = new_model.generator(snake_case_ ) __UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _lowercase : List[str] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[Any] , snake_case_ :List[Any] ): __UpperCAmelCase = 1.5 __UpperCAmelCase = int(factor * num_class_images ) __UpperCAmelCase = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=snake_case_ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: __UpperCAmelCase = client.query(text=snake_case_ ) if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4: break else: __UpperCAmelCase = int(factor * num_images ) __UpperCAmelCase = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case_ , aesthetic_weight=0.1 , ) __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = tqdm(desc='''downloading real regularization images''' , total=snake_case_ ) with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open( F'''{class_data_dir}/images.txt''' , '''w''' ) as fa: while total < num_class_images: __UpperCAmelCase = class_images[count] count += 1 try: __UpperCAmelCase = requests.get(images['''url'''] ) if img.status_code == 200: __UpperCAmelCase = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser('''''' , add_help=snake_case_ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case_ , type=snake_case_ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case_ , type=snake_case_ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case_ ) return parser.parse_args() if __name__ == "__main__": _lowercase : Tuple = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
49
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _UpperCAmelCase ( unittest.TestCase ): def a ( self : str ): __UpperCAmelCase = inspect.getfile(accelerate.test_utils ) __UpperCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __UpperCAmelCase = test_metrics @require_cpu def a ( self : Union[str, Any] ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def a ( self : Dict ): debug_launcher(self.test_metrics.main ) @require_single_gpu def a ( self : int ): self.test_metrics.main() @require_multi_gpu def a ( self : Tuple ): print(F'''Found {torch.cuda.device_count()} devices.''' ) __UpperCAmelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowercase , env=os.environ.copy() )
49
"""simple docstring""" def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(snake_case_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
"""simple docstring""" from collections import deque class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ): __UpperCAmelCase = process_name # process name __UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __UpperCAmelCase = arrival_time __UpperCAmelCase = burst_time # remaining burst time __UpperCAmelCase = 0 # total time of the process wait in ready queue __UpperCAmelCase = 0 # time from arrival time to completion time class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ): # total number of mlfq's queues __UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied __UpperCAmelCase = time_slices # unfinished process is in this ready_queue __UpperCAmelCase = queue # current time __UpperCAmelCase = current_time # finished process is in this sequence queue __UpperCAmelCase = deque() def a ( self : Dict ): __UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a ( self : str , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a ( self : Any , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a ( self : Tuple , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a ( self : Optional[int] , _lowercase : deque[Process] ): return [q.burst_time for q in queue] def a ( self : str , _lowercase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a ( self : Union[str, Any] , _lowercase : deque[Process] ): __UpperCAmelCase = deque() # sequence deque of finished process while len(_lowercase ) != 0: __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __UpperCAmelCase = 0 # set the process's turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time __UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ): __UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_lowercase ) ): __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __UpperCAmelCase = 0 # set the finish time __UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __UpperCAmelCase , __UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : List[str] = Process('P1', 0, 53) _lowercase : str = Process('P2', 0, 17) _lowercase : Union[str, Any] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : Any = 3 _lowercase : Union[str, Any] = [17, 25] _lowercase : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _lowercase : Optional[Any] = Process('P1', 0, 53) _lowercase : Tuple = Process('P2', 0, 17) _lowercase : Optional[int] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : int = 3 _lowercase : int = [17, 25] _lowercase : List[str] = deque([Pa, Pa, Pa, Pa]) _lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
49
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_lowerCAmelCase ) class _UpperCAmelCase ( _lowerCAmelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} ) a__ : ClassVar[Features] = Features({"text": Value("string" )} ) a__ : ClassVar[Features] = Features({"summary": Value("string" )} ) a__ : str = "text" a__ : str = "summary" @property def a ( self : Dict ): return {self.text_column: "text", self.summary_column: "summary"}
49
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : List[Any] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "camembert" def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( _lowerCAmelCase ): @property def a ( self : Tuple ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Optional[Any] = StableDiffusionXLImgaImgPipeline a__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a__ : str = PipelineTesterMixin.required_optional_params - {"latents"} a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Union[str, Any] ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __UpperCAmelCase = EulerDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , ) __UpperCAmelCase = CLIPTextModel(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowercase ) __UpperCAmelCase = CLIPTextModelWithProjection(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowercase ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def a ( self : List[Any] , _lowercase : List[str] , _lowercase : Any=0 ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image / 2 + 0.5 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def a ( self : str ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowercase ) __UpperCAmelCase = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = sd_pipe(**_lowercase ).images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a ( self : List[Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def a ( self : int ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def a ( self : Optional[int] ): pass def a ( self : int ): __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**_lowercase ) __UpperCAmelCase = sd_pipe.to(_lowercase ) __UpperCAmelCase = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) # forward without prompt embeds __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = 3 * ['''this is a negative prompt'''] __UpperCAmelCase = negative_prompt __UpperCAmelCase = 3 * [inputs['''prompt''']] __UpperCAmelCase = sd_pipe(**_lowercase ) __UpperCAmelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = 3 * ['''this is a negative prompt'''] __UpperCAmelCase = 3 * [inputs.pop('''prompt''' )] ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) = sd_pipe.encode_prompt(_lowercase , negative_prompt=_lowercase ) __UpperCAmelCase = sd_pipe( **_lowercase , prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , pooled_prompt_embeds=_lowercase , negative_pooled_prompt_embeds=_lowercase , ) __UpperCAmelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : int="cpu" , _lowercase : List[str]=torch.floataa , _lowercase : Any=0 ): __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) ) __UpperCAmelCase = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase ) __UpperCAmelCase = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a ( self : Tuple ): __UpperCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ).images __UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
49
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : Union[str, Any] = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Any = StableUnCLIPPipeline a__ : Dict = TEXT_TO_IMAGE_PARAMS a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ : Optional[int] = False def a ( self : List[str] ): __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ): if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def a ( self : Any ): __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=_lowercase ) def a ( self : int ): __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowercase ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Any ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase ) def a ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
49
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} _lowercase : List[str] = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } _lowercase : List[str] = { 'allenai/longformer-base-4096': 40_96, 'allenai/longformer-large-4096': 40_96, 'allenai/longformer-large-4096-finetuned-triviaqa': 40_96, 'allenai/longformer-base-4096-extra.pos.embd.only': 40_96, 'allenai/longformer-large-4096-extra.pos.embd.only': 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase__ ( ): __UpperCAmelCase = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __UpperCAmelCase = bs[:] __UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = set() __UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase = char return pairs class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = VOCAB_FILES_NAMES a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["input_ids", "attention_mask"] def __init__( self : Any , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Optional[Any]="replace" , _lowercase : Optional[Any]="<s>" , _lowercase : List[str]="</s>" , _lowercase : str="</s>" , _lowercase : int="<s>" , _lowercase : Tuple="<unk>" , _lowercase : Tuple="<pad>" , _lowercase : Tuple="<mask>" , _lowercase : int=False , **_lowercase : Union[str, Any] , ): __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token super().__init__( errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , ) with open(_lowercase , encoding='''utf-8''' ) as vocab_handle: __UpperCAmelCase = json.load(_lowercase ) __UpperCAmelCase = {v: k for k, v in self.encoder.items()} __UpperCAmelCase = errors # how to handle errors in decoding __UpperCAmelCase = bytes_to_unicode() __UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_lowercase , encoding='''utf-8''' ) as merges_handle: __UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1] __UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) __UpperCAmelCase = {} __UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def a ( self : str ): return len(self.encoder ) def a ( self : List[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def a ( self : str , _lowercase : Any ): if token in self.cache: return self.cache[token] __UpperCAmelCase = tuple(_lowercase ) __UpperCAmelCase = get_pairs(_lowercase ) if not pairs: return token while True: __UpperCAmelCase = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase = bigram __UpperCAmelCase = [] __UpperCAmelCase = 0 while i < len(_lowercase ): try: __UpperCAmelCase = word.index(_lowercase , _lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase = j if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase = tuple(_lowercase ) __UpperCAmelCase = new_word if len(_lowercase ) == 1: break else: __UpperCAmelCase = get_pairs(_lowercase ) __UpperCAmelCase = ''' '''.join(_lowercase ) __UpperCAmelCase = word return word def a ( self : List[Any] , _lowercase : Optional[int] ): __UpperCAmelCase = [] for token in re.findall(self.pat , _lowercase ): __UpperCAmelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(''' ''' ) ) return bpe_tokens def a ( self : int , _lowercase : Union[str, Any] ): return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) ) def a ( self : Any , _lowercase : List[Any] ): return self.decoder.get(_lowercase ) def a ( self : Optional[int] , _lowercase : List[Any] ): __UpperCAmelCase = ''''''.join(_lowercase ) __UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '''\n''' ) __UpperCAmelCase = 0 with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __UpperCAmelCase = token_index writer.write(''' '''.join(_lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] __UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) if token_ids_a is None: return [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1] def a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self : Optional[int] , _lowercase : Tuple , _lowercase : List[str]=False , **_lowercase : Optional[int] ): __UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()): __UpperCAmelCase = ''' ''' + text return (text, kwargs)
49
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( _lowerCAmelCase ): a__ : List[Any] = (EulerDiscreteScheduler,) a__ : Dict = 10 def a ( self : List[Any] , **_lowercase : str ): __UpperCAmelCase = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**_lowercase ) return config def a ( self : Union[str, Any] ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowercase ) def a ( self : Optional[Any] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def a ( self : Tuple ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowercase ) def a ( self : List[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCAmelCase = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase ) __UpperCAmelCase = model(_lowercase , _lowercase ) __UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) __UpperCAmelCase = output.prev_sample __UpperCAmelCase = torch.sum(torch.abs(_lowercase ) ) __UpperCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def a ( self : Optional[Any] ): __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __UpperCAmelCase = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCAmelCase = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase ) __UpperCAmelCase = model(_lowercase , _lowercase ) __UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) __UpperCAmelCase = output.prev_sample __UpperCAmelCase = torch.sum(torch.abs(_lowercase ) ) __UpperCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def a ( self : int ): __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowercase ) __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __UpperCAmelCase = sample.to(_lowercase ) for t in scheduler.timesteps: __UpperCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase ) __UpperCAmelCase = model(_lowercase , _lowercase ) __UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) __UpperCAmelCase = output.prev_sample __UpperCAmelCase = torch.sum(torch.abs(_lowercase ) ) __UpperCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def a ( self : Dict ): __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**_lowercase , use_karras_sigmas=_lowercase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowercase ) __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __UpperCAmelCase = sample.to(_lowercase ) for t in scheduler.timesteps: __UpperCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase ) __UpperCAmelCase = model(_lowercase , _lowercase ) __UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ) __UpperCAmelCase = output.prev_sample __UpperCAmelCase = torch.sum(torch.abs(_lowercase ) ) __UpperCAmelCase = torch.mean(torch.abs(_lowercase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
49
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : str = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _lowercase : int = { 'yjernite/retribert-base-uncased': 5_12, } _lowercase : Any = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = PRETRAINED_INIT_CONFIGURATION a__ : Optional[Any] = RetriBertTokenizer a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ): super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = strip_accents __UpperCAmelCase = tokenize_chinese_chars __UpperCAmelCase = normalizer_class(**_lowercase ) __UpperCAmelCase = do_lower_case def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ): __UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
49
1
"""simple docstring""" from collections import defaultdict def lowercase__ ( snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = True for v in tree[start]: if v not in visited: ret += dfs(snake_case_ ) if ret % 2 == 0: cuts.append(snake_case_ ) return ret def lowercase__ ( ): dfs(1 ) if __name__ == "__main__": _lowercase ,_lowercase : Union[str, Any] = 10, 9 _lowercase : Union[str, Any] = defaultdict(list) _lowercase : dict[int, bool] = {} _lowercase : list[int] = [] _lowercase : Tuple = 0 _lowercase : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
49
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _lowercase : Dict = 'bart' _lowercase : Dict = True @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __UpperCAmelCase = qar_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = (None, None) if MODEL_TYPE == "bart": __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __UpperCAmelCase = sas_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = faiss.StandardGpuResources() __UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __UpperCAmelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) __UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ ) wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU else: __UpperCAmelCase , __UpperCAmelCase = (None, None) __UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __UpperCAmelCase = elia['''train_eli5'''] __UpperCAmelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(snake_case_ ) return (elia_train, eli5_train_q_index) _lowercase ,_lowercase ,_lowercase : Dict = load_indexes() _lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models() _lowercase ,_lowercase : Tuple = load_train_data() def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ): __UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ ) __UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]] return nn_examples def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ): if source == "none": __UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase , __UpperCAmelCase = query_es_index( snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , ) __UpperCAmelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None), } ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ): with torch.no_grad(): __UpperCAmelCase = qa_sas_generate( snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar _lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' _lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) _lowercase : str = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] _lowercase : Optional[int] = st.sidebar.checkbox('Demo options') if demo_options: _lowercase : Tuple = st.sidebar.selectbox( '', action_list, index=3, ) _lowercase : List[str] = action_list.index(action_st) _lowercase : str = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) _lowercase : int = show_type == 'Show full text of passages' else: _lowercase : str = 3 _lowercase : List[Any] = True _lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options') if retrieval_options: _lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) _lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: _lowercase : List[str] = 'wiki40b' _lowercase : Optional[int] = 'dense' _lowercase : List[Any] = 'beam' _lowercase : str = 2 _lowercase : Optional[int] = 64 _lowercase : Union[str, Any] = 2_56 _lowercase : List[str] = None _lowercase : Optional[int] = None _lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options') if generate_options: _lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) _lowercase : Optional[int] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) _lowercase : Optional[Any] = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": _lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _lowercase : List[Any] = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _lowercase : Dict = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _lowercase : Union[str, Any] = None # start main text _lowercase : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] _lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": _lowercase : Optional[Any] = st.text_input('Enter your question here:', '') else: _lowercase : int = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": _lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10) _lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) _lowercase : Dict = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _lowercase : Any = support_list[:10] _lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: _lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _lowercase ,_lowercase : Union[str, Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): _lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) _lowercase : Any = res[1].strip() if sec_titles == "": _lowercase : Dict = '[{}]({})'.format(res[0], wiki_url) else: _lowercase : List[Any] = sec_titles.split(' & ') _lowercase : int = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: _lowercase : List[Any] = find_nearest_training(question) _lowercase : Tuple = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) _lowercase : int = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) _lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _lowercase : Dict = pd.read_csv('sample_data.csv', header=None) _lowercase : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column _lowercase : Optional[int] = df.iloc[:, 1:2] _lowercase : Optional[int] = actual_data.values.reshape(len_data, 1) _lowercase : str = MinMaxScaler().fit_transform(actual_data) _lowercase : Optional[int] = 10 _lowercase : Any = 5 _lowercase : Union[str, Any] = 20 _lowercase : Union[str, Any] = len_data - periods * look_back _lowercase : int = actual_data[:division] _lowercase : Optional[Any] = actual_data[division - look_back :] _lowercase ,_lowercase : List[str] = [], [] _lowercase ,_lowercase : Any = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _lowercase : Optional[int] = np.array(train_x) _lowercase : Any = np.array(test_x) _lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y]) _lowercase : Tuple = np.array([list(i.ravel()) for i in test_y]) _lowercase : int = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') _lowercase : Any = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) _lowercase : Optional[Any] = model.predict(x_test)
49
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = CycleDiffusionPipeline a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __UpperCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __UpperCAmelCase = CLIPTextModel(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image / 2 + 0.5 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def a ( self : Optional[int] ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowercase , '''half''' ): __UpperCAmelCase = module.half() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def a ( self : Tuple ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def a ( self : List[str] ): return super().test_inference_batch_single_identical() @skip_mps def a ( self : int ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def a ( self : str ): return super().test_save_load_optional_components() @skip_mps def a ( self : int ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : int ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained( _lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def a ( self : Optional[Any] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
49
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( _lowerCAmelCase ): a__ : torch.FloatTensor class _UpperCAmelCase ( nn.Module ): def __init__( self : str , _lowercase : Optional[Any]=3 , _lowercase : List[Any]=3 , _lowercase : Dict=("DownEncoderBlock2D",) , _lowercase : Dict=(64,) , _lowercase : Union[str, Any]=2 , _lowercase : List[str]=32 , _lowercase : List[str]="silu" , _lowercase : Optional[int]=True , ): super().__init__() __UpperCAmelCase = layers_per_block __UpperCAmelCase = torch.nn.Convad( _lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __UpperCAmelCase = None __UpperCAmelCase = nn.ModuleList([] ) # down __UpperCAmelCase = block_out_channels[0] for i, down_block_type in enumerate(_lowercase ): __UpperCAmelCase = output_channel __UpperCAmelCase = block_out_channels[i] __UpperCAmelCase = i == len(_lowercase ) - 1 __UpperCAmelCase = get_down_block( _lowercase , num_layers=self.layers_per_block , in_channels=_lowercase , out_channels=_lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , ) self.down_blocks.append(_lowercase ) # mid __UpperCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , ) # out __UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowercase , eps=1E-6 ) __UpperCAmelCase = nn.SiLU() __UpperCAmelCase = 2 * out_channels if double_z else out_channels __UpperCAmelCase = nn.Convad(block_out_channels[-1] , _lowercase , 3 , padding=1 ) __UpperCAmelCase = False def a ( self : List[str] , _lowercase : Optional[Any] ): __UpperCAmelCase = x __UpperCAmelCase = self.conv_in(_lowercase ) if self.training and self.gradient_checkpointing: def create_custom_forward(_lowercase : Dict ): def custom_forward(*_lowercase : Any ): return module(*_lowercase ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: __UpperCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(_lowercase ) , _lowercase , use_reentrant=_lowercase ) # middle __UpperCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _lowercase , use_reentrant=_lowercase ) else: for down_block in self.down_blocks: __UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase ) , _lowercase ) # middle __UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowercase ) else: # down for down_block in self.down_blocks: __UpperCAmelCase = down_block(_lowercase ) # middle __UpperCAmelCase = self.mid_block(_lowercase ) # post-process __UpperCAmelCase = self.conv_norm_out(_lowercase ) __UpperCAmelCase = self.conv_act(_lowercase ) __UpperCAmelCase = self.conv_out(_lowercase ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self : Dict , _lowercase : Any=3 , _lowercase : Optional[Any]=3 , _lowercase : List[str]=("UpDecoderBlock2D",) , _lowercase : List[str]=(64,) , _lowercase : Optional[int]=2 , _lowercase : Optional[Any]=32 , _lowercase : Optional[Any]="silu" , _lowercase : List[Any]="group" , ): super().__init__() __UpperCAmelCase = layers_per_block __UpperCAmelCase = nn.Convad( _lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __UpperCAmelCase = None __UpperCAmelCase = nn.ModuleList([] ) __UpperCAmelCase = in_channels if norm_type == '''spatial''' else None # mid __UpperCAmelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , ) # up __UpperCAmelCase = list(reversed(_lowercase ) ) __UpperCAmelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(_lowercase ): __UpperCAmelCase = output_channel __UpperCAmelCase = reversed_block_out_channels[i] __UpperCAmelCase = i == len(_lowercase ) - 1 __UpperCAmelCase = get_up_block( _lowercase , num_layers=self.layers_per_block + 1 , in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , resnet_time_scale_shift=_lowercase , ) self.up_blocks.append(_lowercase ) __UpperCAmelCase = output_channel # out if norm_type == "spatial": __UpperCAmelCase = SpatialNorm(block_out_channels[0] , _lowercase ) else: __UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowercase , eps=1E-6 ) __UpperCAmelCase = nn.SiLU() __UpperCAmelCase = nn.Convad(block_out_channels[0] , _lowercase , 3 , padding=1 ) __UpperCAmelCase = False def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Optional[Any]=None ): __UpperCAmelCase = z __UpperCAmelCase = self.conv_in(_lowercase ) __UpperCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_lowercase : Optional[Any] ): def custom_forward(*_lowercase : Optional[int] ): return module(*_lowercase ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle __UpperCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _lowercase , _lowercase , use_reentrant=_lowercase ) __UpperCAmelCase = sample.to(_lowercase ) # up for up_block in self.up_blocks: __UpperCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(_lowercase ) , _lowercase , _lowercase , use_reentrant=_lowercase ) else: # middle __UpperCAmelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , _lowercase , _lowercase ) __UpperCAmelCase = sample.to(_lowercase ) # up for up_block in self.up_blocks: __UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase ) , _lowercase , _lowercase ) else: # middle __UpperCAmelCase = self.mid_block(_lowercase , _lowercase ) __UpperCAmelCase = sample.to(_lowercase ) # up for up_block in self.up_blocks: __UpperCAmelCase = up_block(_lowercase , _lowercase ) # post-process if latent_embeds is None: __UpperCAmelCase = self.conv_norm_out(_lowercase ) else: __UpperCAmelCase = self.conv_norm_out(_lowercase , _lowercase ) __UpperCAmelCase = self.conv_act(_lowercase ) __UpperCAmelCase = self.conv_out(_lowercase ) return sample class _UpperCAmelCase ( nn.Module ): def __init__( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any , _lowercase : Union[str, Any]=None , _lowercase : Optional[Any]="random" , _lowercase : List[str]=False , _lowercase : Optional[Any]=True ): super().__init__() __UpperCAmelCase = n_e __UpperCAmelCase = vq_embed_dim __UpperCAmelCase = beta __UpperCAmelCase = legacy __UpperCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __UpperCAmelCase = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) __UpperCAmelCase = self.used.shape[0] __UpperCAmelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __UpperCAmelCase = self.re_embed __UpperCAmelCase = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: __UpperCAmelCase = n_e __UpperCAmelCase = sane_index_shape def a ( self : Union[str, Any] , _lowercase : str ): __UpperCAmelCase = inds.shape assert len(_lowercase ) > 1 __UpperCAmelCase = inds.reshape(ishape[0] , -1 ) __UpperCAmelCase = self.used.to(_lowercase ) __UpperCAmelCase = (inds[:, :, None] == used[None, None, ...]).long() __UpperCAmelCase = match.argmax(-1 ) __UpperCAmelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __UpperCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __UpperCAmelCase = self.unknown_index return new.reshape(_lowercase ) def a ( self : Any , _lowercase : Union[str, Any] ): __UpperCAmelCase = inds.shape assert len(_lowercase ) > 1 __UpperCAmelCase = inds.reshape(ishape[0] , -1 ) __UpperCAmelCase = self.used.to(_lowercase ) if self.re_embed > self.used.shape[0]: # extra token __UpperCAmelCase = 0 # simply set to zero __UpperCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowercase ) return back.reshape(_lowercase ) def a ( self : List[Any] , _lowercase : List[str] ): # reshape z -> (batch, height, width, channel) and flatten __UpperCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous() __UpperCAmelCase = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __UpperCAmelCase = torch.argmin(torch.cdist(_lowercase , self.embedding.weight ) , dim=1 ) __UpperCAmelCase = self.embedding(_lowercase ).view(z.shape ) __UpperCAmelCase = None __UpperCAmelCase = None # compute loss for embedding if not self.legacy: __UpperCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __UpperCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __UpperCAmelCase = z + (z_q - z).detach() # reshape back to match original input shape __UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __UpperCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __UpperCAmelCase = self.remap_to_used(_lowercase ) __UpperCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __UpperCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def a ( self : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: __UpperCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis __UpperCAmelCase = self.unmap_to_all(_lowercase ) __UpperCAmelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __UpperCAmelCase = self.embedding(_lowercase ) if shape is not None: __UpperCAmelCase = z_q.view(_lowercase ) # reshape back to match original input shape __UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict=False ): __UpperCAmelCase = parameters __UpperCAmelCase , __UpperCAmelCase = torch.chunk(_lowercase , 2 , dim=1 ) __UpperCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 ) __UpperCAmelCase = deterministic __UpperCAmelCase = torch.exp(0.5 * self.logvar ) __UpperCAmelCase = torch.exp(self.logvar ) if self.deterministic: __UpperCAmelCase = __UpperCAmelCase = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def a ( self : Union[str, Any] , _lowercase : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype __UpperCAmelCase = randn_tensor( self.mean.shape , generator=_lowercase , device=self.parameters.device , dtype=self.parameters.dtype ) __UpperCAmelCase = self.mean + self.std * sample return x def a ( self : Optional[int] , _lowercase : Tuple=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def a ( self : Dict , _lowercase : int , _lowercase : str=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) __UpperCAmelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowercase ) def a ( self : List[str] ): return self.mean
49
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'} _lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _lowercase : List[str] = { 'google/rembert': 2_56, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Union[str, Any] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ): super().__init__( do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(_lowercase ) @property def a ( self : int ): return len(self.sp_model ) def a ( self : Tuple ): __UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Tuple , _lowercase : str ): __UpperCAmelCase = d __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ): __UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase ) return pieces def a ( self : int , _lowercase : List[str] ): return self.sp_model.PieceToId(_lowercase ) def a ( self : List[str] , _lowercase : str ): return self.sp_model.IdToPiece(_lowercase ) def a ( self : Any , _lowercase : Dict ): __UpperCAmelCase = self.sp_model.decode_pieces(_lowercase ) return out_string def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
49
1
"""simple docstring""" import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _lowercase : Dict = False try: _lowercase : Union[str, Any] = _is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class _UpperCAmelCase : def __init__( self : Optional[int] , _lowercase : str = None , _lowercase : list = [] ): __UpperCAmelCase = 0 __UpperCAmelCase = choices __UpperCAmelCase = prompt if sys.platform == "win32": __UpperCAmelCase = '''*''' else: __UpperCAmelCase = '''➔ ''' def a ( self : Dict , _lowercase : Dict , _lowercase : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , _lowercase ) else: forceWrite(self.choices[index] , _lowercase ) def a ( self : List[str] , _lowercase : int ): if index == self.position: forceWrite(F''' {self.arrow_char} ''' ) self.write_choice(_lowercase ) else: forceWrite(F''' {self.choices[index]}''' ) reset_cursor() def a ( self : List[Any] , _lowercase : Direction , _lowercase : int = 1 ): __UpperCAmelCase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(_lowercase ) move_cursor(_lowercase , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP['''up'''] ) def a ( self : Any ): self.move_direction(Direction.UP ) @input.mark(KEYMAP['''down'''] ) def a ( self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP['''newline'''] ) def a ( self : str ): move_cursor(len(self.choices ) - self.position , '''DOWN''' ) return self.position @input.mark(KEYMAP['''interrupt'''] ) def a ( self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , '''DOWN''' ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] ) def a ( self : Tuple ): __UpperCAmelCase = int(chr(self.current_selection ) ) __UpperCAmelCase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , _lowercase ) else: return else: return def a ( self : str , _lowercase : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , '''\n''' ) if in_colab: forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' ) else: forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' ) __UpperCAmelCase = default_choice for i in range(len(self.choices ) ): self.print_choice(_lowercase ) forceWrite('''\n''' ) move_cursor(len(self.choices ) - self.position , '''UP''' ) with cursor.hide(): while True: if in_colab: try: __UpperCAmelCase = int(builtins.input() ) except ValueError: __UpperCAmelCase = default_choice else: __UpperCAmelCase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , '''UP''' ) clear_line() self.write_choice(_lowercase , '''\n''' ) return choice
49
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : List[Any] = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowercase__ ( snake_case_ :str , snake_case_ :Dict=0 ): return sorted(snake_case_ , key=lambda snake_case_ : x[column] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Optional[Any]=float('''inf''' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , snake_case_ ): __UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCAmelCase = current_dis return min_dis def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :Any=float('''inf''' ) ): for i in range(min(6 , points_counts - 1 ) , snake_case_ ): for j in range(max(0 , i - 6 ) , snake_case_ ): __UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCAmelCase = current_dis return min_dis def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :List[str] , snake_case_ :Dict ): # base case if points_counts <= 3: return dis_between_closest_pair(snake_case_ , snake_case_ ) # recursion __UpperCAmelCase = points_counts // 2 __UpperCAmelCase = closest_pair_of_points_sqr( snake_case_ , points_sorted_on_y[:mid] , snake_case_ ) __UpperCAmelCase = closest_pair_of_points_sqr( snake_case_ , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCAmelCase = min(snake_case_ , snake_case_ ) __UpperCAmelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(snake_case_ ) __UpperCAmelCase = dis_between_closest_in_strip( snake_case_ , len(snake_case_ ) , snake_case_ ) return min(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Any ): __UpperCAmelCase = column_based_sort(snake_case_ , column=0 ) __UpperCAmelCase = column_based_sort(snake_case_ , column=1 ) return ( closest_pair_of_points_sqr( snake_case_ , snake_case_ , snake_case_ ) ) ** 0.5 if __name__ == "__main__": _lowercase : Optional[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
49
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowercase : List[Any] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase__ ( snake_case_ :Union[str, Any] ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): if args.student_type == "roberta": __UpperCAmelCase = False elif args.student_type == "gpt2": __UpperCAmelCase = False def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ): if args.student_type == "roberta": __UpperCAmelCase = False def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' ) __UpperCAmelCase = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __UpperCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ ) __UpperCAmelCase = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __UpperCAmelCase = special_tok_ids __UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __UpperCAmelCase = 0.0 # do not predict special tokens __UpperCAmelCase = torch.from_numpy(snake_case_ ) else: __UpperCAmelCase = None __UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __UpperCAmelCase = student_config_class.from_pretrained(args.student_config ) __UpperCAmelCase = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: __UpperCAmelCase = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __UpperCAmelCase = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase : Optional[int] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowercase : Union[str, Any] = logging.getLogger(__name__) _lowercase : Optional[Any] = 'Hello world! cécé herlolip' _lowercase : str = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase__ ( snake_case_ :Any , snake_case_ :int ): __UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) __UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage ) __UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ ) original.eval() __UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) __UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs __UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) __UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __UpperCAmelCase = encoder_input_ids __UpperCAmelCase = decoder_input_ids __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = original.generator(snake_case_ ) __UpperCAmelCase = new_model( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = new_model.generator(snake_case_ ) __UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _lowercase : List[str] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] ): print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(snake_case_ ): for j in range(snake_case_ ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowercase__ ( snake_case_ :int , snake_case_ :Any ): __UpperCAmelCase = [[float('''inf''' ) for _ in range(snake_case_ )] for _ in range(snake_case_ )] for i in range(snake_case_ ): for j in range(snake_case_ ): __UpperCAmelCase = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(snake_case_ ): # looping through rows of graph array for i in range(snake_case_ ): # looping through columns of graph array for j in range(snake_case_ ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): __UpperCAmelCase = dist[i][k] + dist[k][j] _print_dist(snake_case_ , snake_case_ ) return dist, v if __name__ == "__main__": _lowercase : str = int(input('Enter number of vertices: ')) _lowercase : Any = int(input('Enter number of edges: ')) _lowercase : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): _lowercase : Any = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) _lowercase : Any = int(input('Enter source:')) _lowercase : str = int(input('Enter destination:')) _lowercase : Tuple = float(input('Enter weight:')) _lowercase : int = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
49
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def a ( self : List[str] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ): __UpperCAmelCase = ort.SessionOptions() __UpperCAmelCase = False return options def a ( self : Any ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self : Optional[int] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
49
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def lowercase__ ( snake_case_ :Dict ): __UpperCAmelCase = {} __UpperCAmelCase = job['''started_at'''] __UpperCAmelCase = job['''completed_at'''] __UpperCAmelCase = date_parser.parse(snake_case_ ) __UpperCAmelCase = date_parser.parse(snake_case_ ) __UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __UpperCAmelCase = start __UpperCAmelCase = end __UpperCAmelCase = duration_in_min return job_info def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any]=None ): __UpperCAmelCase = None if token is not None: __UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''} __UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __UpperCAmelCase = requests.get(snake_case_ , headers=snake_case_ ).json() __UpperCAmelCase = {} try: job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} ) __UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(snake_case_ ): __UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json() job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": _lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') _lowercase : Tuple = parser.parse_args() _lowercase : List[str] = get_job_time(args.workflow_run_id) _lowercase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v["duration"]}""")
49
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( snake_case_ :Dict , snake_case_ :int ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __UpperCAmelCase = features.copy() __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ): if issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = jsonl_path elif issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = [jsonl_path] __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: __UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ): if split: __UpperCAmelCase = {split: jsonl_path} else: __UpperCAmelCase = '''train''' __UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( snake_case_ :Optional[int] ): return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Any ): return [json.loads(snake_case_ ) for line in buffer] class _UpperCAmelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 def a ( self : int , _lowercase : Any ): with pytest.raises(_lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ): __UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' __UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() assert exported_content == original_content
49
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase : Optional[int] = 16 _lowercase : Optional[Any] = 32 def lowercase__ ( snake_case_ :Accelerator , snake_case_ :int = 16 , snake_case_ :str = "bert-base-cased" ): __UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ ) __UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ :Optional[int] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCAmelCase = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ :Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Optional[int] , snake_case_ :Tuple , snake_case_ :Optional[int] ): model.eval() __UpperCAmelCase = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase = model(**snake_case_ ) __UpperCAmelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCAmelCase , __UpperCAmelCase = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __UpperCAmelCase = metric.compute() return eval_metric["accuracy"] def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): # Initialize accelerator __UpperCAmelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCAmelCase = config['''lr'''] __UpperCAmelCase = int(config['''num_epochs'''] ) __UpperCAmelCase = int(config['''seed'''] ) __UpperCAmelCase = int(config['''batch_size'''] ) __UpperCAmelCase = args.model_name_or_path set_seed(snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __UpperCAmelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __UpperCAmelCase = 1 __UpperCAmelCase = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __UpperCAmelCase = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __UpperCAmelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCAmelCase = 0 __UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) __UpperCAmelCase = num_epochs if args.partial_train_epoch is not None: __UpperCAmelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __UpperCAmelCase = args.resume_from_checkpoint.split('''epoch_''' )[1] __UpperCAmelCase = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __UpperCAmelCase = int(snake_case_ ) + 1 __UpperCAmelCase = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f: __UpperCAmelCase = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __UpperCAmelCase = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __UpperCAmelCase = model(**snake_case_ ) __UpperCAmelCase = outputs.loss __UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __UpperCAmelCase = F'''epoch_{epoch}''' __UpperCAmelCase = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __UpperCAmelCase = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = accuracy __UpperCAmelCase = lr_scheduler.get_lr()[0] __UpperCAmelCase = optimizer.param_groups[0]['''lr'''] __UpperCAmelCase = epoch __UpperCAmelCase = overall_step accelerator.print(F'''epoch {epoch}:''' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
49
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Dict = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] _lowercase : Union[str, Any] = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" def lowercase__ ( snake_case_ :dict ): __UpperCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCAmelCase = set() return any( node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for node in graph ) def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ): visited.add(snake_case_ ) rec_stk.add(snake_case_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(snake_case_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :list ): __UpperCAmelCase = 0 while len(snake_case_ ) > 1: __UpperCAmelCase = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __UpperCAmelCase = files.index(min(snake_case_ ) ) temp += files[min_index] files.pop(snake_case_ ) files.append(snake_case_ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _lowercase : List[str] = None _lowercase : Dict = logging.get_logger(__name__) _lowercase : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _lowercase : Tuple = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } _lowercase : Any = { 'albert-base-v1': 5_12, 'albert-large-v1': 5_12, 'albert-xlarge-v1': 5_12, 'albert-xxlarge-v1': 5_12, 'albert-base-v2': 5_12, 'albert-large-v2': 5_12, 'albert-xlarge-v2': 5_12, 'albert-xxlarge-v2': 5_12, } _lowercase : Union[str, Any] = '▁' class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[Any] = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Union[str, Any] = AlbertTokenizer def __init__( self : Dict , _lowercase : Optional[int]=None , _lowercase : List[Any]=None , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : List[str]=False , _lowercase : List[str]="[CLS]" , _lowercase : int="[SEP]" , _lowercase : Optional[Any]="<unk>" , _lowercase : List[Any]="[SEP]" , _lowercase : int="<pad>" , _lowercase : Optional[int]="[CLS]" , _lowercase : List[str]="[MASK]" , **_lowercase : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __UpperCAmelCase = ( AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase , normalized=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token ) super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = False if not self.vocab_file else True def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
"""simple docstring""" import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : List[Any] = MobileBertTokenizer a__ : str = MobileBertTokenizerFast a__ : str = True a__ : Optional[Any] = True a__ : List[str] = filter_non_english a__ : Optional[Any] = "google/mobilebert-uncased" def a ( self : int ): super().setUp() __UpperCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __UpperCAmelCase = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def a ( self : Any , _lowercase : Union[str, Any] ): __UpperCAmelCase = '''UNwant\u00E9d,running''' __UpperCAmelCase = '''unwanted, running''' return input_text, output_text def a ( self : Optional[int] ): __UpperCAmelCase = self.tokenizer_class(self.vocab_file ) __UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [9, 6, 7, 12, 10, 11] ) def a ( self : Any ): if not self.test_rust_tokenizer: return __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_rust_tokenizer() __UpperCAmelCase = '''UNwant\u00E9d,running''' __UpperCAmelCase = tokenizer.tokenize(_lowercase ) __UpperCAmelCase = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = self.get_rust_tokenizer() __UpperCAmelCase = tokenizer.encode(_lowercase ) __UpperCAmelCase = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # With lower casing __UpperCAmelCase = self.get_tokenizer(do_lower_case=_lowercase ) __UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=_lowercase ) __UpperCAmelCase = '''UNwant\u00E9d,running''' __UpperCAmelCase = tokenizer.tokenize(_lowercase ) __UpperCAmelCase = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = self.get_rust_tokenizer() __UpperCAmelCase = tokenizer.encode(_lowercase ) __UpperCAmelCase = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def a ( self : Union[str, Any] ): __UpperCAmelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def a ( self : Optional[Any] ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def a ( self : int ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def a ( self : Optional[Any] ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def a ( self : str ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def a ( self : List[Any] ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def a ( self : Dict ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def a ( self : List[Any] ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = BasicTokenizer(do_lower_case=_lowercase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def a ( self : Optional[int] ): __UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __UpperCAmelCase = {} for i, token in enumerate(_lowercase ): __UpperCAmelCase = i __UpperCAmelCase = WordpieceTokenizer(vocab=_lowercase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def a ( self : str ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def a ( self : Optional[int] ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def a ( self : Dict ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def a ( self : Dict ): __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def a ( self : List[Any] ): __UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase ) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def a ( self : Union[str, Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus( _lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , ) __UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_lowercase , '''do_lower_case''' ) else False __UpperCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def a ( self : str ): __UpperCAmelCase = ['''的''', '''人''', '''有'''] __UpperCAmelCase = ''''''.join(_lowercase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = True __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = False __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) __UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase ) # it is expected that only the first Chinese character is not preceded by "##". __UpperCAmelCase = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowercase ) ] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(_lowercase , _lowercase )
49
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowercase : str = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowercase : Any = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowercase : int = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len([g for position, g in enumerate(snake_case_ ) if g == main_target[position]] ) return (item, float(snake_case_ )) def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = random.randint(0 , len(snake_case_ ) - 1 ) __UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] __UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowercase__ ( snake_case_ :str , snake_case_ :list[str] ): __UpperCAmelCase = list(snake_case_ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __UpperCAmelCase = random.choice(snake_case_ ) return "".join(snake_case_ ) def lowercase__ ( snake_case_ :tuple[str, float] , snake_case_ :list[tuple[str, float]] , snake_case_ :list[str] , ): __UpperCAmelCase = [] # Generate more children proportionally to the fitness score. __UpperCAmelCase = int(parent_a[1] * 100 ) + 1 __UpperCAmelCase = 10 if child_n >= 10 else child_n for _ in range(snake_case_ ): __UpperCAmelCase = population_score[random.randint(0 , snake_case_ )][0] __UpperCAmelCase , __UpperCAmelCase = crossover(parent_a[0] , snake_case_ ) # Append new string to the population list. pop.append(mutate(snake_case_ , snake_case_ ) ) pop.append(mutate(snake_case_ , snake_case_ ) ) return pop def lowercase__ ( snake_case_ :str , snake_case_ :list[str] , snake_case_ :bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __UpperCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(snake_case_ ) # Verify that the target contains no genes besides the ones inside genes variable. __UpperCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __UpperCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(snake_case_ ) # Generate random starting population. __UpperCAmelCase = [] for _ in range(snake_case_ ): population.append(''''''.join([random.choice(snake_case_ ) for i in range(len(snake_case_ ) )] ) ) # Just some logs to know what the algorithms is doing. __UpperCAmelCase , __UpperCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(snake_case_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __UpperCAmelCase = [evaluate(snake_case_ , snake_case_ ) for item in population] # Check if there is a matching evolution. __UpperCAmelCase = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __UpperCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(snake_case_ ) # Normalize population score to be between 0 and 1. __UpperCAmelCase = [ (item, score / len(snake_case_ )) for item, score in population_score ] # This is selection for i in range(snake_case_ ): population.extend(select(population_score[int(snake_case_ )] , snake_case_ , snake_case_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(snake_case_ ) > N_POPULATION: break if __name__ == "__main__": _lowercase : Optional[int] = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) _lowercase : Union[str, Any] = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) _lowercase ,_lowercase ,_lowercase : Optional[int] = basic(target_str, genes_list) print( f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
49
"""simple docstring""" def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(snake_case_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :list[int] , snake_case_ :str ): __UpperCAmelCase = int(snake_case_ ) # Initialize Result __UpperCAmelCase = [] # Traverse through all denomination for denomination in reversed(snake_case_ ): # Find denominations while int(snake_case_ ) >= int(snake_case_ ): total_value -= int(snake_case_ ) answer.append(snake_case_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _lowercase : str = [] _lowercase : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): _lowercase : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) _lowercase : Optional[Any] = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter _lowercase : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] _lowercase : List[str] = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"""Following is minimal change for {value}: """) _lowercase : Optional[int] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
49
"""simple docstring""" from collections import deque class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ): __UpperCAmelCase = process_name # process name __UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __UpperCAmelCase = arrival_time __UpperCAmelCase = burst_time # remaining burst time __UpperCAmelCase = 0 # total time of the process wait in ready queue __UpperCAmelCase = 0 # time from arrival time to completion time class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ): # total number of mlfq's queues __UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied __UpperCAmelCase = time_slices # unfinished process is in this ready_queue __UpperCAmelCase = queue # current time __UpperCAmelCase = current_time # finished process is in this sequence queue __UpperCAmelCase = deque() def a ( self : Dict ): __UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a ( self : str , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a ( self : Any , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a ( self : Tuple , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a ( self : Optional[int] , _lowercase : deque[Process] ): return [q.burst_time for q in queue] def a ( self : str , _lowercase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a ( self : Union[str, Any] , _lowercase : deque[Process] ): __UpperCAmelCase = deque() # sequence deque of finished process while len(_lowercase ) != 0: __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __UpperCAmelCase = 0 # set the process's turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time __UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ): __UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_lowercase ) ): __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __UpperCAmelCase = 0 # set the finish time __UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __UpperCAmelCase , __UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : List[str] = Process('P1', 0, 53) _lowercase : str = Process('P2', 0, 17) _lowercase : Union[str, Any] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : Any = 3 _lowercase : Union[str, Any] = [17, 25] _lowercase : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _lowercase : Optional[Any] = Process('P1', 0, 53) _lowercase : Tuple = Process('P2', 0, 17) _lowercase : Optional[int] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : int = 3 _lowercase : int = [17, 25] _lowercase : List[str] = deque([Pa, Pa, Pa, Pa]) _lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : List[Any] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "camembert" def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( _lowerCAmelCase ): @property def a ( self : Tuple ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _UpperCAmelCase ( unittest.TestCase ): def __init__( self : Tuple , _lowercase : Tuple , _lowercase : List[Any]=7 , _lowercase : List[str]=3 , _lowercase : List[Any]=30 , _lowercase : Union[str, Any]=4_00 , _lowercase : Optional[int]=True , _lowercase : Any=None , _lowercase : Optional[Any]=0.9 , _lowercase : Optional[int]=None , _lowercase : Optional[int]=True , _lowercase : Tuple=[0.5, 0.5, 0.5] , _lowercase : Any=[0.5, 0.5, 0.5] , ): __UpperCAmelCase = size if size is not None else {'''shortest_edge''': 30} __UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = min_resolution __UpperCAmelCase = max_resolution __UpperCAmelCase = do_resize_and_center_crop __UpperCAmelCase = size __UpperCAmelCase = crop_pct __UpperCAmelCase = crop_size __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean __UpperCAmelCase = image_std def a ( self : int ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = PoolFormerImageProcessor if is_vision_available() else None def a ( self : List[Any] ): __UpperCAmelCase = PoolFormerImageProcessingTester(self ) @property def a ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def a ( self : Any ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(_lowercase , '''size''' ) ) self.assertTrue(hasattr(_lowercase , '''crop_pct''' ) ) self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowercase , '''image_std''' ) ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) __UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def a ( self : List[str] ): pass def a ( self : Dict ): # Initialize image_processing __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a ( self : Union[str, Any] ): # Initialize image_processing __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def a ( self : Union[str, Any] ): # Initialize image_processing __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
49
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowercase : int = logging.get_logger(__name__) _lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : List[str] = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } _lowercase : Optional[int] = { 'gpt-neox-20b': 20_48, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : List[str] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = ["input_ids", "attention_mask"] def __init__( self : Tuple , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None , _lowercase : str=None , _lowercase : List[Any]="<|endoftext|>" , _lowercase : List[Any]="<|endoftext|>" , _lowercase : Optional[int]="<|endoftext|>" , _lowercase : List[Any]=False , **_lowercase : Optional[int] , ): super().__init__( _lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowercase ) != add_prefix_space: __UpperCAmelCase = getattr(_lowercase , pre_tok_state.pop('''type''' ) ) __UpperCAmelCase = add_prefix_space __UpperCAmelCase = pre_tok_class(**_lowercase ) __UpperCAmelCase = add_prefix_space def a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase ) def a ( self : Any , _lowercase : "Conversation" ): __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_lowercase , add_special_tokens=_lowercase ) + [self.eos_token_id] ) if len(_lowercase ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] return input_ids
49
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Any = StableUnCLIPPipeline a__ : Dict = TEXT_TO_IMAGE_PARAMS a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false a__ : Optional[int] = False def a ( self : List[str] ): __UpperCAmelCase = 32 __UpperCAmelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = DDPMScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , ) # regular denoising components torch.manual_seed(0 ) __UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase ) __UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL() __UpperCAmelCase = { # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ): if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def a ( self : Any ): __UpperCAmelCase = torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=_lowercase ) def a ( self : int ): __UpperCAmelCase = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowercase ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Any ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' ) __UpperCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase ) def a ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __UpperCAmelCase = pipe( '''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
49
1
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowercase : Tuple = logging.get_logger(__name__) _lowercase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } _lowercase : Tuple = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowercase__ ( snake_case_ :Tuple , snake_case_ :Dict , snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Dict ): for attribute in key.split('''.''' ): __UpperCAmelCase = getattr(snake_case_ , snake_case_ ) if weight_type is not None: __UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape else: __UpperCAmelCase = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase = value elif weight_type == "weight_g": __UpperCAmelCase = value elif weight_type == "weight_v": __UpperCAmelCase = value elif weight_type == "bias": __UpperCAmelCase = value else: __UpperCAmelCase = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__ ( snake_case_ :Dict , snake_case_ :Dict ): __UpperCAmelCase = [] __UpperCAmelCase = fairseq_model.state_dict() __UpperCAmelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __UpperCAmelCase = None for name, value in fairseq_dict.items(): __UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCAmelCase = True elif name.split('''.''' )[0] == "proj": __UpperCAmelCase = fairseq_model.proj __UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __UpperCAmelCase = True if "*" in mapped_key: __UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2] __UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ ) if "weight_g" in name: __UpperCAmelCase = '''weight_g''' elif "weight_v" in name: __UpperCAmelCase = '''weight_v''' elif "bias" in name: __UpperCAmelCase = '''bias''' elif "weight" in name: __UpperCAmelCase = '''weight''' else: __UpperCAmelCase = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) return proj_weight def lowercase__ ( snake_case_ :str , snake_case_ :List[Any] , snake_case_ :str , snake_case_ :List[Any] , snake_case_ :Dict ): __UpperCAmelCase = full_name.split('''conv_layers.''' )[-1] __UpperCAmelCase = name.split('''.''' ) __UpperCAmelCase = int(items[0] ) __UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __UpperCAmelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) def lowercase__ ( snake_case_ :Any ): __UpperCAmelCase , __UpperCAmelCase = emb.weight.shape __UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ ) __UpperCAmelCase = emb.weight.data return lin_layer def lowercase__ ( snake_case_ :str ): with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f: __UpperCAmelCase = f.readlines() __UpperCAmelCase = [line.split(''' ''' )[0] for line in lines] __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Optional[int] , snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :List[Any] , ): __UpperCAmelCase = WavaVecaConfig.from_pretrained(snake_case_ ) __UpperCAmelCase = SpeechaTextaConfig.from_pretrained( snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ ) __UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __UpperCAmelCase = model[0].eval() # set weights for wav2vec2 encoder __UpperCAmelCase = WavaVecaModel(snake_case_ ) __UpperCAmelCase = recursively_load_weights_wavaveca(model.encoder , snake_case_ ) __UpperCAmelCase = SpeechaTextaForCausalLM(snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) __UpperCAmelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) __UpperCAmelCase = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ ) __UpperCAmelCase = False # add projection layer __UpperCAmelCase = nn.Parameter(projection_layer.weight ) __UpperCAmelCase = nn.Parameter(projection_layer.bias ) __UpperCAmelCase = create_vocab_dict(snake_case_ ) with open(os.path.join(snake_case_ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(snake_case_ , snake_case_ ) __UpperCAmelCase = SpeechaTextaTokenizer(os.path.join(snake_case_ , '''vocab.json''' ) ) tokenizer.save_pretrained(snake_case_ ) __UpperCAmelCase = hf_wavavec.config.to_dict() __UpperCAmelCase = tokenizer.pad_token_id __UpperCAmelCase = tokenizer.bos_token_id __UpperCAmelCase = tokenizer.eos_token_id __UpperCAmelCase = '''speech_to_text_2''' __UpperCAmelCase = '''wav2vec2''' __UpperCAmelCase = SpeechEncoderDecoderConfig.from_dict(snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) feature_extractor.save_pretrained(snake_case_ ) if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') _lowercase : Dict = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
49
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" def lowercase__ ( snake_case_ :str ): assert column_title.isupper() __UpperCAmelCase = 0 __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = 0 while index >= 0: __UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
49
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : str = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _lowercase : int = { 'yjernite/retribert-base-uncased': 5_12, } _lowercase : Any = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = PRETRAINED_INIT_CONFIGURATION a__ : Optional[Any] = RetriBertTokenizer a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ): super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = strip_accents __UpperCAmelCase = tokenize_chinese_chars __UpperCAmelCase = normalizer_class(**_lowercase ) __UpperCAmelCase = do_lower_case def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ): __UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
49
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Optional[Any] ): __UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __UpperCAmelCase = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(_lowercase ) , torch_builtin(_lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(_lowercase ) , gelu_new(_lowercase ) ) ) def a ( self : Any ): __UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __UpperCAmelCase = get_activation('''gelu''' ) __UpperCAmelCase = get_activation('''gelu_10''' ) __UpperCAmelCase = torch_builtin(_lowercase ) __UpperCAmelCase = geluaa(_lowercase ) __UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(_lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def a ( self : int ): get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(_lowercase ): get_activation('''bogus''' ) with self.assertRaises(_lowercase ): get_activation(_lowercase ) def a ( self : List[Any] ): __UpperCAmelCase = get_activation('''gelu''' ) __UpperCAmelCase = 1 __UpperCAmelCase = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_lowercase ): __UpperCAmelCase = acta.a
49
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _lowercase : Dict = 'bart' _lowercase : Dict = True @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __UpperCAmelCase = qar_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = (None, None) if MODEL_TYPE == "bart": __UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __UpperCAmelCase = sas_model.eval() else: __UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): if LOAD_DENSE_INDEX: __UpperCAmelCase = faiss.StandardGpuResources() __UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __UpperCAmelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) __UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ ) wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU else: __UpperCAmelCase , __UpperCAmelCase = (None, None) __UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=snake_case_ ) def lowercase__ ( ): __UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __UpperCAmelCase = elia['''train_eli5'''] __UpperCAmelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __UpperCAmelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(snake_case_ ) return (elia_train, eli5_train_q_index) _lowercase ,_lowercase ,_lowercase : Dict = load_indexes() _lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models() _lowercase ,_lowercase : Tuple = load_train_data() def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ): __UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ ) __UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]] return nn_examples def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ): if source == "none": __UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase , __UpperCAmelCase = query_es_index( snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , ) __UpperCAmelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case_ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None), } ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ): with torch.no_grad(): __UpperCAmelCase = qa_sas_generate( snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar _lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' _lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) _lowercase : str = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] _lowercase : Optional[int] = st.sidebar.checkbox('Demo options') if demo_options: _lowercase : Tuple = st.sidebar.selectbox( '', action_list, index=3, ) _lowercase : List[str] = action_list.index(action_st) _lowercase : str = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) _lowercase : int = show_type == 'Show full text of passages' else: _lowercase : str = 3 _lowercase : List[Any] = True _lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options') if retrieval_options: _lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) _lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: _lowercase : List[str] = 'wiki40b' _lowercase : Optional[int] = 'dense' _lowercase : List[Any] = 'beam' _lowercase : str = 2 _lowercase : Optional[int] = 64 _lowercase : Union[str, Any] = 2_56 _lowercase : List[str] = None _lowercase : Optional[int] = None _lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options') if generate_options: _lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) _lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) _lowercase : Optional[int] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) _lowercase : Optional[Any] = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": _lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _lowercase : List[Any] = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _lowercase : Dict = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _lowercase : Union[str, Any] = None # start main text _lowercase : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] _lowercase : Optional[int] = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": _lowercase : Optional[Any] = st.text_input('Enter your question here:', '') else: _lowercase : int = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": _lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10) _lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10) _lowercase : Dict = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _lowercase : Any = support_list[:10] _lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: _lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _lowercase ,_lowercase : Union[str, Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): _lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) _lowercase : Any = res[1].strip() if sec_titles == "": _lowercase : Dict = '[{}]({})'.format(res[0], wiki_url) else: _lowercase : List[Any] = sec_titles.split(' & ') _lowercase : int = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: _lowercase : List[Any] = find_nearest_training(question) _lowercase : Tuple = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) _lowercase : int = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) _lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
49
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _lowercase : List[str] = logging.get_logger(__name__) _lowercase : Tuple = { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json', 'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json', 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "longformer" def __init__( self : List[str] , _lowercase : Union[List[int], int] = 5_12 , _lowercase : int = 2 , _lowercase : int = 1 , _lowercase : int = 0 , _lowercase : int = 2 , _lowercase : int = 3_05_22 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 30_72 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 5_12 , _lowercase : int = 2 , _lowercase : float = 0.02 , _lowercase : float = 1E-12 , _lowercase : bool = False , **_lowercase : List[str] , ): super().__init__(pad_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = attention_window __UpperCAmelCase = sep_token_id __UpperCAmelCase = bos_token_id __UpperCAmelCase = eos_token_id __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = onnx_export class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : List[str] , _lowercase : "PretrainedConfig" , _lowercase : str = "default" , _lowercase : "List[PatchingSpec]" = None ): super().__init__(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = True @property def a ( self : Dict ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def a ( self : int ): __UpperCAmelCase = super().outputs if self.task == "default": __UpperCAmelCase = {0: '''batch'''} return outputs @property def a ( self : Any ): return 1E-4 @property def a ( self : Optional[Any] ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def a ( self : List[str] , _lowercase : "PreTrainedTokenizerBase" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ): __UpperCAmelCase = super().generate_dummy_inputs( preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly __UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global __UpperCAmelCase = 1 return inputs
49
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[str] = CycleDiffusionPipeline a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"} a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def a ( self : Optional[int] ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __UpperCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) __UpperCAmelCase = CLIPTextModel(_lowercase ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image / 2 + 0.5 if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def a ( self : Optional[int] ): __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a ( self : Optional[int] ): __UpperCAmelCase = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowercase , '''half''' ): __UpperCAmelCase = module.half() __UpperCAmelCase = CycleDiffusionPipeline(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = self.get_dummy_inputs(_lowercase ) __UpperCAmelCase = pipe(**_lowercase ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def a ( self : Tuple ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def a ( self : List[str] ): return super().test_inference_batch_single_identical() @skip_mps def a ( self : int ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def a ( self : str ): return super().test_save_load_optional_components() @skip_mps def a ( self : int ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : int ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained( _lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def a ( self : Optional[Any] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = '''CompVis/stable-diffusion-v1-4''' __UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' ) __UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() __UpperCAmelCase = '''A black colored car''' __UpperCAmelCase = '''A blue colored car''' __UpperCAmelCase = torch.manual_seed(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images assert np.abs(image - expected_image ).max() < 2E-2
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Tuple = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'} _lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _lowercase : List[str] = { 'google/rembert': 2_56, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Union[str, Any] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ): super().__init__( do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(_lowercase ) @property def a ( self : int ): return len(self.sp_model ) def a ( self : Tuple ): __UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Tuple , _lowercase : str ): __UpperCAmelCase = d __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ): __UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase ) return pieces def a ( self : int , _lowercase : List[str] ): return self.sp_model.PieceToId(_lowercase ) def a ( self : List[str] , _lowercase : str ): return self.sp_model.IdToPiece(_lowercase ) def a ( self : Any , _lowercase : Dict ): __UpperCAmelCase = self.sp_model.decode_pieces(_lowercase ) return out_string def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) ) return __UpperCAmelCase = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
49
1
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _UpperCAmelCase : def __init__( self : Tuple ): __UpperCAmelCase = '''''' __UpperCAmelCase = '''''' __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 2_56 __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = 0 def a ( self : List[Any] , _lowercase : List[Any] ): __UpperCAmelCase = cva.imread(_lowercase , 0 ) __UpperCAmelCase = copy.deepcopy(self.img ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' ) __UpperCAmelCase = np.sum(_lowercase ) for i in range(len(_lowercase ) ): __UpperCAmelCase = x[i] / self.k self.sk += prk __UpperCAmelCase = (self.L - 1) * self.sk if self.rem != 0: __UpperCAmelCase = int(last % last ) __UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_lowercase ) __UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) __UpperCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): __UpperCAmelCase = self.img[j][i] if num != self.last_list[num]: __UpperCAmelCase = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def a ( self : Tuple ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def a ( self : Union[str, Any] ): cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": _lowercase : Optional[int] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') _lowercase : Union[str, Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
49
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : List[Any] = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[Any] = "bert-generation" def __init__( self : Any , _lowercase : List[Any]=5_03_58 , _lowercase : str=10_24 , _lowercase : str=24 , _lowercase : int=16 , _lowercase : Any=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=0.02 , _lowercase : Dict=1E-12 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Dict=1 , _lowercase : Dict="absolute" , _lowercase : Tuple=True , **_lowercase : str , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
49
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _lowercase : List[Any] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowercase__ ( snake_case_ :Union[str, Any] ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowercase__ ( snake_case_ :int , snake_case_ :Dict ): if args.student_type == "roberta": __UpperCAmelCase = False elif args.student_type == "gpt2": __UpperCAmelCase = False def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ): if args.student_type == "roberta": __UpperCAmelCase = False def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' ) __UpperCAmelCase = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __UpperCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ ) __UpperCAmelCase = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __UpperCAmelCase = special_tok_ids __UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __UpperCAmelCase = 0.0 # do not predict special tokens __UpperCAmelCase = torch.from_numpy(snake_case_ ) else: __UpperCAmelCase = None __UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __UpperCAmelCase = student_config_class.from_pretrained(args.student_config ) __UpperCAmelCase = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: __UpperCAmelCase = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __UpperCAmelCase = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
49
1
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('0.12.2'): raise Exception('requires fairseq >= 0.12.2') if version.parse(fairseq.__version__) > version.parse('2'): raise Exception('requires fairseq < v2') logging.set_verbosity_info() _lowercase : Tuple = logging.get_logger(__name__) _lowercase : Dict = 'Hello, World!' _lowercase : Optional[int] = 'en_XX' def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :bool ): __UpperCAmelCase = Path('''data_bin''' ) __UpperCAmelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case_ ).parent ) , checkpoint_file=Path(snake_case_ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(snake_case_ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(snake_case_ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , ) xmod.eval() # disable dropout print(snake_case_ ) __UpperCAmelCase = xmod.model.encoder.sentence_encoder __UpperCAmelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , snake_case_ ) __UpperCAmelCase = XmodForSequenceClassification(snake_case_ ) if classification_head else XmodForMaskedLM(snake_case_ ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCAmelCase = xmod_sent_encoder.embed_tokens.weight __UpperCAmelCase = xmod_sent_encoder.embed_positions.weight __UpperCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.weight __UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCAmelCase = model.roberta.encoder.layer[i] __UpperCAmelCase = xmod_sent_encoder.layers[i] # self attention __UpperCAmelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) __UpperCAmelCase = xmod_layer.self_attn.q_proj.weight __UpperCAmelCase = xmod_layer.self_attn.q_proj.bias __UpperCAmelCase = xmod_layer.self_attn.k_proj.weight __UpperCAmelCase = xmod_layer.self_attn.k_proj.bias __UpperCAmelCase = xmod_layer.self_attn.v_proj.weight __UpperCAmelCase = xmod_layer.self_attn.v_proj.bias # self-attention output __UpperCAmelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) __UpperCAmelCase = xmod_layer.self_attn.out_proj.weight __UpperCAmelCase = xmod_layer.self_attn.out_proj.bias __UpperCAmelCase = xmod_layer.self_attn_layer_norm.weight __UpperCAmelCase = xmod_layer.self_attn_layer_norm.bias # intermediate __UpperCAmelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) __UpperCAmelCase = xmod_layer.fca.weight __UpperCAmelCase = xmod_layer.fca.bias # output __UpperCAmelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) __UpperCAmelCase = xmod_layer.fca.weight __UpperCAmelCase = xmod_layer.fca.bias __UpperCAmelCase = xmod_layer.final_layer_norm.weight __UpperCAmelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __UpperCAmelCase = xmod_layer.adapter_layer_norm.weight __UpperCAmelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __UpperCAmelCase = bert_output.adapter_modules[lang_code] __UpperCAmelCase = xmod_layer.adapter_modules[lang_code] __UpperCAmelCase = from_adapter.fca.weight __UpperCAmelCase = from_adapter.fca.bias __UpperCAmelCase = from_adapter.fca.weight __UpperCAmelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __UpperCAmelCase = xmod_sent_encoder.layer_norm.weight __UpperCAmelCase = xmod_sent_encoder.layer_norm.bias if classification_head: __UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.weight __UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.bias __UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight __UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head __UpperCAmelCase = xmod.model.encoder.lm_head.dense.weight __UpperCAmelCase = xmod.model.encoder.lm_head.dense.bias __UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight __UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias __UpperCAmelCase = xmod.model.encoder.lm_head.weight __UpperCAmelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCAmelCase = xmod.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case_ ) __UpperCAmelCase = model(snake_case_ )[0] if classification_head: __UpperCAmelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(snake_case_ ) ) else: __UpperCAmelCase = xmod.model(snake_case_ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": _lowercase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) _lowercase : Dict = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
1
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowercase__ ( snake_case_ :Dataset , snake_case_ :Dict[str, str] ): __UpperCAmelCase = args.log_outputs __UpperCAmelCase = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric __UpperCAmelCase = load_metric('''wer''' ) __UpperCAmelCase = load_metric('''cer''' ) # compute metrics __UpperCAmelCase = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) __UpperCAmelCase = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results __UpperCAmelCase = F'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case_ ) with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __UpperCAmelCase = F'''log_{dataset_id}_predictions.txt''' __UpperCAmelCase = F'''log_{dataset_id}_targets.txt''' with open(snake_case_ , '''w''' ) as p, open(snake_case_ , '''w''' ) as t: # mapping function to write output def write_to_file(snake_case_ :Optional[int] , snake_case_ :str ): p.write(F'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(snake_case_ , with_indices=snake_case_ ) def lowercase__ ( snake_case_ :str ): __UpperCAmelCase = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __UpperCAmelCase = re.sub(snake_case_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __UpperCAmelCase = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: __UpperCAmelCase = ''' '''.join(text.split(snake_case_ ) ) return text def lowercase__ ( snake_case_ :Dict ): # load dataset __UpperCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __UpperCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) __UpperCAmelCase = feature_extractor.sampling_rate # resample audio __UpperCAmelCase = dataset.cast_column('''audio''' , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: __UpperCAmelCase = 0 if torch.cuda.is_available() else -1 __UpperCAmelCase = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ :Dict ): __UpperCAmelCase = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __UpperCAmelCase = prediction['''text'''] __UpperCAmelCase = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples __UpperCAmelCase = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": _lowercase : str = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) _lowercase : List[Any] = parser.parse_args() main(args)
49
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowercase : Union[str, Any] = logging.getLogger(__name__) _lowercase : Optional[Any] = 'Hello world! cécé herlolip' _lowercase : str = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase__ ( snake_case_ :Any , snake_case_ :int ): __UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) __UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage ) __UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ ) original.eval() __UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) __UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs __UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) __UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) ) __UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __UpperCAmelCase = encoder_input_ids __UpperCAmelCase = decoder_input_ids __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = original.generator(snake_case_ ) __UpperCAmelCase = new_model( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0] __UpperCAmelCase = new_model.generator(snake_case_ ) __UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) ) __UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) _lowercase : List[str] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
49
1
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[Any] ): __UpperCAmelCase = get_activation('''swish''' ) self.assertIsInstance(_lowercase , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def a ( self : List[str] ): __UpperCAmelCase = get_activation('''silu''' ) self.assertIsInstance(_lowercase , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def a ( self : Optional[Any] ): __UpperCAmelCase = get_activation('''mish''' ) self.assertIsInstance(_lowercase , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def a ( self : Any ): __UpperCAmelCase = get_activation('''gelu''' ) self.assertIsInstance(_lowercase , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
49
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def a ( self : List[str] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ): __UpperCAmelCase = ort.SessionOptions() __UpperCAmelCase = False return options def a ( self : Any ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a ( self : Optional[int] ): __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = '''A red cat sitting on a park bench''' __UpperCAmelCase = np.random.RandomState(0 ) __UpperCAmelCase = pipe( prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , ) __UpperCAmelCase = output.images __UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
49
1
"""simple docstring""" import heapq import sys import numpy as np _lowercase : Optional[Any] = tuple[int, int] class _UpperCAmelCase : def __init__( self : List[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = set() def a ( self : Tuple ): if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def a ( self : List[Any] ): return len(self.elements ) == 0 def a ( self : Any , _lowercase : Optional[Any] , _lowercase : int ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(_lowercase ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def a ( self : Any , _lowercase : Any ): if item in self.set: self.set.remove(_lowercase ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def a ( self : Union[str, Any] ): return self.elements[0][1] def a ( self : Tuple ): ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(_lowercase ) return (priority, item) def lowercase__ ( snake_case_ :TPos , snake_case_ :TPos ): # euclidean distance __UpperCAmelCase = np.array(snake_case_ ) __UpperCAmelCase = np.array(snake_case_ ) return np.linalg.norm(a - b ) def lowercase__ ( snake_case_ :TPos , snake_case_ :TPos ): # integer division by time variable return consistent_heuristic(snake_case_ , snake_case_ ) // t def lowercase__ ( snake_case_ :TPos , snake_case_ :TPos ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowercase__ ( snake_case_ :TPos , snake_case_ :int , snake_case_ :TPos , snake_case_ :dict[TPos, float] ): __UpperCAmelCase = g_function[start] + Wa * heuristics[i](snake_case_ , snake_case_ ) return ans def lowercase__ ( snake_case_ :Dict , snake_case_ :int , snake_case_ :List[str] ): __UpperCAmelCase = np.chararray((n, n) ) for i in range(snake_case_ ): for j in range(snake_case_ ): __UpperCAmelCase = '''*''' for i in range(snake_case_ ): for j in range(snake_case_ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(snake_case_ ): for j in range(snake_case_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(snake_case_ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(snake_case_ ) sys.exit() def lowercase__ ( snake_case_ :TPos ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowercase__ ( snake_case_ :List[str] , snake_case_ :Dict , snake_case_ :str , snake_case_ :Optional[int] , snake_case_ :List[str] , snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :Union[str, Any] , ): for itera in range(snake_case_ ): open_list[itera].remove_element(snake_case_ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(snake_case_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(snake_case_ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(snake_case_ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(snake_case_ , key(snake_case_ , 0 , snake_case_ , snake_case_ ) ) if neighbours not in close_list_inad: for var in range(1 , snake_case_ ): if key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) <= Wa * key( snake_case_ , 0 , snake_case_ , snake_case_ ): open_list[j].put( snake_case_ , key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ) def lowercase__ ( ): __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowercase : Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowercase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowercase : int = make_common_ground() _lowercase : List[Any] = blocks_blk # hyper parameters _lowercase : Tuple = 1 _lowercase : Dict = 1 _lowercase : List[str] = 20 _lowercase : List[Any] = 3 # one consistent and two other inconsistent # start and end destination _lowercase : Dict = (0, 0) _lowercase : Dict = (n - 1, n - 1) _lowercase : Tuple = 1 def lowercase__ ( snake_case_ :TPos , snake_case_ :TPos , snake_case_ :int ): __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(snake_case_ ): open_list.append(PriorityQueue() ) open_list[i].put(snake_case_ , key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , snake_case_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(snake_case_ ) expand_state( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) close_list_inad.append(snake_case_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(snake_case_ , snake_case_ , snake_case_ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(snake_case_ ) expand_state( snake_case_ , 0 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) close_list_anchor.append(snake_case_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(snake_case_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
49
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__ ( snake_case_ :Dict , snake_case_ :int ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __UpperCAmelCase = features.copy() __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ): if issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = jsonl_path elif issubclass(snake_case_ , snake_case_ ): __UpperCAmelCase = [jsonl_path] __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: __UpperCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ): __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = features.copy() if features else default_expected_features __UpperCAmelCase = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) __UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ): if split: __UpperCAmelCase = {split: jsonl_path} else: __UpperCAmelCase = '''train''' __UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __UpperCAmelCase = tmp_path / '''cache''' __UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowercase__ ( snake_case_ :Optional[int] ): return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Any ): return [json.loads(snake_case_ ) for line in buffer] class _UpperCAmelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json_function(_lowercase ) assert isinstance(_lowercase , _lowercase ) assert isinstance(exported_content[0] , _lowercase ) assert len(_lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write() buffer.seek(0 ) __UpperCAmelCase = load_json(_lowercase ) assert isinstance(_lowercase , _lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_lowercase ) == 10 def a ( self : int , _lowercase : Any ): with pytest.raises(_lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ): __UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' __UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f: __UpperCAmelCase = f.read() assert exported_content == original_content
49
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : a__ : Optional[Any] = PegasusConfig a__ : Optional[int] = {} a__ : Dict = "gelu" def __init__( self : List[Any] , _lowercase : int , _lowercase : List[Any]=13 , _lowercase : Dict=7 , _lowercase : str=True , _lowercase : str=False , _lowercase : List[Any]=99 , _lowercase : str=32 , _lowercase : Dict=2 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=37 , _lowercase : List[Any]=0.1 , _lowercase : Any=0.1 , _lowercase : Optional[Any]=40 , _lowercase : Dict=2 , _lowercase : List[str]=1 , _lowercase : List[str]=0 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = eos_token_id __UpperCAmelCase = pad_token_id __UpperCAmelCase = bos_token_id def a ( self : Tuple ): __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase ) return config, inputs_dict def a ( self : Optional[int] , _lowercase : Tuple , _lowercase : str ): __UpperCAmelCase = TFPegasusModel(config=_lowercase ).get_decoder() __UpperCAmelCase = inputs_dict['''input_ids'''] __UpperCAmelCase = input_ids[:1, :] __UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :] __UpperCAmelCase = inputs_dict['''head_mask'''] __UpperCAmelCase = 1 # first forward pass __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase ) __UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase )[0] __UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] __UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[str]=None , snake_case_ :Dict=None , snake_case_ :List[Any]=None , snake_case_ :Dict=None , snake_case_ :int=None , ): if attention_mask is None: __UpperCAmelCase = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : Optional[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () a__ : int = (TFPegasusForConditionalGeneration,) if is_tf_available() else () a__ : List[Any] = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) a__ : Optional[Any] = True a__ : Union[str, Any] = False a__ : str = False def a ( self : Dict ): __UpperCAmelCase = TFPegasusModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase ) def a ( self : Union[str, Any] ): self.config_tester.run_common_tests() def a ( self : Tuple ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowercase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): a__ : Tuple = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] a__ : Dict = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers a__ : List[Any] = "google/pegasus-xsum" @cached_property def a ( self : Optional[int] ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def a ( self : List[str] ): __UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def a ( self : Tuple , **_lowercase : List[str] ): __UpperCAmelCase = self.translate_src_text(**_lowercase ) assert self.expected_text == generated_words def a ( self : Optional[Any] , **_lowercase : Any ): __UpperCAmelCase = self.tokenizer(self.src_text , **_lowercase , padding=_lowercase , return_tensors='''tf''' ) __UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowercase , ) __UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowercase ) return generated_words @slow def a ( self : Union[str, Any] ): self._assert_generated_batch_equal_expected()
49
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Union[str, Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = tokenizer.decode(greedy_ids[0] ) __UpperCAmelCase = TextIteratorStreamer(_lowercase ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowercase , _lowercase ) def a ( self : str ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase ) __UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :] __UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase ) model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __UpperCAmelCase = cs.out[:-1] self.assertEqual(_lowercase , _lowercase ) def a ( self : Tuple ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them __UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id with CaptureStdout() as cs: __UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase ) model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __UpperCAmelCase = cs.out[:-1] # Remove the final "\n" __UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a ( self : Tuple ): __UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase ) __UpperCAmelCase = -1 __UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase ) __UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 ) __UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} __UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowercase ): __UpperCAmelCase = '''''' for new_text in streamer: streamer_text += new_text
49
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ): a__ : Tuple = 1 @register_to_config def __init__( self : List[Any] , _lowercase : Any=20_00 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : Optional[int]=1E-3 ): __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None def a ( self : List[Any] , _lowercase : str , _lowercase : Union[str, torch.device] = None ): __UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase ) def a ( self : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : Dict=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score __UpperCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) __UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) __UpperCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): __UpperCAmelCase = std.unsqueeze(-1 ) __UpperCAmelCase = -score / std # compute __UpperCAmelCase = -1.0 / len(self.timesteps ) __UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) __UpperCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): __UpperCAmelCase = beta_t.unsqueeze(-1 ) __UpperCAmelCase = -0.5 * beta_t * x __UpperCAmelCase = torch.sqrt(_lowercase ) __UpperCAmelCase = drift - diffusion**2 * score __UpperCAmelCase = x + drift * dt # add noise __UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype ) __UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Optional[int] ): return self.config.num_train_timesteps
49
"""simple docstring""" def lowercase__ ( snake_case_ :float , snake_case_ :float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowercase__ ( snake_case_ :Any ): __UpperCAmelCase = int(snake_case_ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = t // 3_600, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :str=300 ): # docstyle-ignore return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def lowercase__ ( snake_case_ :Dict ): __UpperCAmelCase = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __UpperCAmelCase = F'''{elt:.6f}''' if isinstance(snake_case_ , snake_case_ ) else str(snake_case_ ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _UpperCAmelCase : a__ : str = 5 a__ : Any = 0.2 def __init__( self : List[Any] , _lowercase : int , _lowercase : Optional[str] = None , _lowercase : bool = True , _lowercase : Optional["NotebookTrainingTracker"] = None , _lowercase : int = 3_00 , ): __UpperCAmelCase = total __UpperCAmelCase = '''''' if prefix is None else prefix __UpperCAmelCase = leave __UpperCAmelCase = parent __UpperCAmelCase = width __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None def a ( self : List[str] , _lowercase : int , _lowercase : bool = False , _lowercase : str = None ): __UpperCAmelCase = value if comment is not None: __UpperCAmelCase = comment if self.last_value is None: __UpperCAmelCase = __UpperCAmelCase = time.time() __UpperCAmelCase = __UpperCAmelCase = value __UpperCAmelCase = __UpperCAmelCase = None __UpperCAmelCase = self.warmup __UpperCAmelCase = 1 self.update_bar(_lowercase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __UpperCAmelCase = time.time() __UpperCAmelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __UpperCAmelCase = self.elapsed_time / (value - self.start_value) else: __UpperCAmelCase = None if value >= self.total: __UpperCAmelCase = self.total __UpperCAmelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: __UpperCAmelCase = self.average_time_per_item * (self.total - value) self.update_bar(_lowercase ) __UpperCAmelCase = value __UpperCAmelCase = current_time if self.average_time_per_item is None: __UpperCAmelCase = 1 else: __UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=None ): __UpperCAmelCase = ''' ''' * (len(str(self.total ) ) - len(str(_lowercase ) )) + str(_lowercase ) if self.elapsed_time is None: __UpperCAmelCase = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: __UpperCAmelCase = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: __UpperCAmelCase = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def a ( self : int ): __UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def a ( self : Any ): if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : str=None ): super().__init__(_lowercase ) __UpperCAmelCase = None if column_names is None else [column_names] __UpperCAmelCase = None def a ( self : List[str] ): __UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=_lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def a ( self : str , _lowercase : int ): if self.inner_table is None: __UpperCAmelCase = [list(values.keys() ), list(values.values() )] else: __UpperCAmelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(_lowercase ) __UpperCAmelCase = columns self.inner_table.append([values[c] for c in columns] ) def a ( self : List[str] , _lowercase : List[str] , _lowercase : str=None , _lowercase : int=3_00 ): __UpperCAmelCase = NotebookProgressBar(_lowercase , prefix=_lowercase , parent=self , width=_lowercase ) return self.child_bar def a ( self : Optional[int] ): __UpperCAmelCase = None self.display() class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : List[Any] ): __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = False def a ( self : Tuple , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , **_lowercase : str ): __UpperCAmelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) __UpperCAmelCase = NotebookTrainingTracker(state.max_steps , _lowercase ) def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Tuple , **_lowercase : str ): __UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) __UpperCAmelCase = False def a ( self : Any , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any]=None , **_lowercase : str ): if not has_length(_lowercase ): return if self.prediction_bar is None: if self.training_tracker is not None: __UpperCAmelCase = self.training_tracker.add_child(len(_lowercase ) ) else: __UpperCAmelCase = NotebookProgressBar(len(_lowercase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def a ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Optional[int] , **_lowercase : Dict ): if self.prediction_bar is not None: self.prediction_bar.close() __UpperCAmelCase = None def a ( self : List[str] , _lowercase : int , _lowercase : Tuple , _lowercase : List[str] , _lowercase : int=None , **_lowercase : List[str] ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __UpperCAmelCase = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy __UpperCAmelCase = state.global_step self.training_tracker.write_line(_lowercase ) def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : str , _lowercase : int=None , **_lowercase : Union[str, Any] ): if self.training_tracker is not None: __UpperCAmelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: __UpperCAmelCase = log['''loss'''] break if self.first_column == "Epoch": __UpperCAmelCase = int(state.epoch ) else: __UpperCAmelCase = state.global_step __UpperCAmelCase = '''eval''' for k in metrics: if k.endswith('''_loss''' ): __UpperCAmelCase = re.sub(r'''\_loss$''' , '''''' , _lowercase ) __UpperCAmelCase = metrics.pop('''total_flos''' , _lowercase ) __UpperCAmelCase = metrics.pop('''epoch''' , _lowercase ) __UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_runtime''' , _lowercase ) __UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _lowercase ) __UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _lowercase ) __UpperCAmelCase = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _lowercase ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': __UpperCAmelCase = v else: __UpperCAmelCase = k.split('''_''' ) __UpperCAmelCase = ''' '''.join([part.capitalize() for part in splits[1:]] ) __UpperCAmelCase = v self.training_tracker.write_line(_lowercase ) self.training_tracker.remove_child() __UpperCAmelCase = None # Evaluation takes a long time so we should force the next update. __UpperCAmelCase = True def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : List[Any] , **_lowercase : List[str] ): self.training_tracker.update( state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=_lowercase ) __UpperCAmelCase = None
49
"""simple docstring""" def lowercase__ ( snake_case_ :dict ): __UpperCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __UpperCAmelCase = set() return any( node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for node in graph ) def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ): visited.add(snake_case_ ) rec_stk.add(snake_case_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(snake_case_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
49
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowercase : int = logging.get_logger(__name__) _lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase : str = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _lowercase : int = { 'yjernite/retribert-base-uncased': 5_12, } _lowercase : Any = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : str = PRETRAINED_INIT_CONFIGURATION a__ : Optional[Any] = RetriBertTokenizer a__ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ): super().__init__( _lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , ) __UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = strip_accents __UpperCAmelCase = tokenize_chinese_chars __UpperCAmelCase = normalizer_class(**_lowercase ) __UpperCAmelCase = do_lower_case def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ): __UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
49
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
49
"""simple docstring""" def lowercase__ ( snake_case_ :Dict ): # noqa: E741 __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n __UpperCAmelCase = [False] * n def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ): if parent == root: out_edge_count += 1 __UpperCAmelCase = True __UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase = True # AP found via cycle if at == low[to]: __UpperCAmelCase = True else: __UpperCAmelCase = min(low[at] , snake_case_ ) return out_edge_count for i in range(snake_case_ ): if not visited[i]: __UpperCAmelCase = 0 __UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ ) __UpperCAmelCase = out_edge_count > 1 for x in range(len(snake_case_ ) ): if is_art[x] is True: print(snake_case_ ) # Adjacency list of graph _lowercase : Optional[Any] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
49
1
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _UpperCAmelCase ( datasets.BeamBasedBuilder ): def a ( self : Optional[int] ): return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_lowercase , ) def a ( self : Tuple , _lowercase : List[Any] , _lowercase : Union[str, Any] ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def a ( self : List[str] , _lowercase : str , _lowercase : Dict ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_lowercase ) class _UpperCAmelCase ( datasets.BeamBasedBuilder ): def a ( self : Optional[int] ): return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_lowercase , ) def a ( self : int , _lowercase : Tuple , _lowercase : List[str] ): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def a ( self : List[str] , _lowercase : str , _lowercase : Any ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_lowercase ) def lowercase__ ( ): return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def lowercase__ ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class _UpperCAmelCase ( _lowerCAmelCase ): @require_beam def a ( self : List[str] ): __UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCAmelCase = DummyBeamDataset(cache_dir=_lowercase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_lowercase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) __UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _lowercase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowercase ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a ( self : Any ): import apache_beam as beam __UpperCAmelCase = beam.io.parquetio.WriteToParquet __UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCAmelCase = DummyBeamDataset(cache_dir=_lowercase , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: __UpperCAmelCase = partial(_lowercase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _lowercase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( _lowercase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) __UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _lowercase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowercase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a ( self : List[str] ): with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCAmelCase = DummyBeamDataset(cache_dir=_lowercase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a ( self : Tuple ): __UpperCAmelCase = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCAmelCase = NestedBeamDataset(cache_dir=_lowercase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_lowercase , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) __UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _lowercase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _lowercase ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
49
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "EncodecFeatureExtractor" a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ): super().__init__(_lowercase , _lowercase ) __UpperCAmelCase = self.feature_extractor __UpperCAmelCase = False def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ): return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase ) def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''text''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase ) if audio is not None: __UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase ) if audio is None: return inputs elif text is None: return audio_inputs else: __UpperCAmelCase = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: __UpperCAmelCase = audio_inputs['''padding_mask'''] return inputs def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''audio''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase ) if len(_lowercase ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(_lowercase , padding_mask=_lowercase ) else: return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ): return self.tokenizer.decode(*_lowercase , **_lowercase ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ): __UpperCAmelCase = to_numpy(_lowercase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape if padding_mask is None: return list(_lowercase ) __UpperCAmelCase = to_numpy(_lowercase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __UpperCAmelCase = seq_len - padding_mask.shape[-1] __UpperCAmelCase = 1 - self.feature_extractor.padding_value __UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase ) __UpperCAmelCase = audio_values.tolist() for i in range(_lowercase ): __UpperCAmelCase = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 ) return audio_values
49
1
"""simple docstring""" import functools def lowercase__ ( snake_case_ :list[int] , snake_case_ :list[int] ): # Validation if not isinstance(snake_case_ , snake_case_ ) or not all(isinstance(snake_case_ , snake_case_ ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(snake_case_ ) != 3 or not all(isinstance(snake_case_ , snake_case_ ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(snake_case_ ) == 0: return 0 if min(snake_case_ ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(snake_case_ ) >= 366: raise ValueError('''All days elements should be less than 366''' ) __UpperCAmelCase = set(snake_case_ ) @functools.cache def dynamic_programming(snake_case_ :int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" def lowercase__ ( snake_case_ :str , snake_case_ :str ): __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = len(snake_case_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(snake_case_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
49
1
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] ): self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for a, b in zip(_lowercase , _lowercase ): self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase ) def a ( self : List[str] ): __UpperCAmelCase = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_lowercase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a ( self : Tuple ): __UpperCAmelCase = None ops.enable_eager_execution_internal() __UpperCAmelCase = tf.config.list_physical_devices('''CPU''' ) if len(_lowercase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __UpperCAmelCase = tf.config.list_logical_devices(device_type='''CPU''' ) __UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __UpperCAmelCase = GradientAccumulator() __UpperCAmelCase = tf.Variable([4.0, 3.0] ) __UpperCAmelCase , __UpperCAmelCase = create_optimizer(5E-5 , 10 , 5 ) __UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=_lowercase ) def accumulate_on_replica(_lowercase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(_lowercase : Dict , _lowercase : Tuple ): with strategy.scope(): __UpperCAmelCase = strategy.experimental_local_results(_lowercase ) local_variables[0].assign(_lowercase ) local_variables[1].assign(_lowercase ) strategy.run(_lowercase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_lowercase ) def _check_local_values(_lowercase : Tuple , _lowercase : Optional[Any] ): __UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
49
"""simple docstring""" from collections import deque class _UpperCAmelCase : def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ): __UpperCAmelCase = process_name # process name __UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __UpperCAmelCase = arrival_time __UpperCAmelCase = burst_time # remaining burst time __UpperCAmelCase = 0 # total time of the process wait in ready queue __UpperCAmelCase = 0 # time from arrival time to completion time class _UpperCAmelCase : def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ): # total number of mlfq's queues __UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied __UpperCAmelCase = time_slices # unfinished process is in this ready_queue __UpperCAmelCase = queue # current time __UpperCAmelCase = current_time # finished process is in this sequence queue __UpperCAmelCase = deque() def a ( self : Dict ): __UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a ( self : str , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a ( self : Any , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a ( self : Tuple , _lowercase : list[Process] ): __UpperCAmelCase = [] for i in range(len(_lowercase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a ( self : Optional[int] , _lowercase : deque[Process] ): return [q.burst_time for q in queue] def a ( self : str , _lowercase : Process ): process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a ( self : Union[str, Any] , _lowercase : deque[Process] ): __UpperCAmelCase = deque() # sequence deque of finished process while len(_lowercase ) != 0: __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_lowercase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __UpperCAmelCase = 0 # set the process's turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time __UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ): __UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_lowercase ) ): __UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_lowercase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_lowercase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __UpperCAmelCase = 0 # set the finish time __UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished __UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_lowercase ) self.finish_queue.extend(_lowercase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a ( self : Union[str, Any] ): # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __UpperCAmelCase , __UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _lowercase : List[str] = Process('P1', 0, 53) _lowercase : str = Process('P2', 0, 17) _lowercase : Union[str, Any] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : Any = 3 _lowercase : Union[str, Any] = [17, 25] _lowercase : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _lowercase : Optional[Any] = Process('P1', 0, 53) _lowercase : Tuple = Process('P2', 0, 17) _lowercase : Optional[int] = Process('P3', 0, 68) _lowercase : int = Process('P4', 0, 24) _lowercase : int = 3 _lowercase : int = [17, 25] _lowercase : List[str] = deque([Pa, Pa, Pa, Pa]) _lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0) _lowercase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
49
1
"""simple docstring""" from typing import Any def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ): _validation( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) # Creates data structures and fill initial step __UpperCAmelCase = {} __UpperCAmelCase = {} for state in states_space: __UpperCAmelCase = observations_space[0] __UpperCAmelCase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) __UpperCAmelCase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case_ ) ): __UpperCAmelCase = observations_space[o] __UpperCAmelCase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state # Update probabilities and pointers dicts __UpperCAmelCase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) __UpperCAmelCase = arg_max # The final observation __UpperCAmelCase = observations_space[len(snake_case_ ) - 1] # argmax for given final observation __UpperCAmelCase = '''''' __UpperCAmelCase = -1 for k_state in states_space: __UpperCAmelCase = probabilities[(k_state, final_observation)] if probability > max_probability: __UpperCAmelCase = probability __UpperCAmelCase = k_state __UpperCAmelCase = arg_max # Process pointers backwards __UpperCAmelCase = last_state __UpperCAmelCase = [] for o in range(len(snake_case_ ) - 1 , -1 , -1 ): result.append(snake_case_ ) __UpperCAmelCase = pointers[previous, observations_space[o]] result.reverse() return result def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_not_empty( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) _validate_lists(snake_case_ , snake_case_ ) _validate_dicts( snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any ): _validate_list(snake_case_ , '''observations_space''' ) _validate_list(snake_case_ , '''states_space''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list''' raise ValueError(snake_case_ ) else: for x in _object: if not isinstance(snake_case_ , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a list of strings''' raise ValueError(snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ): _validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ ) _validate_nested_dict(snake_case_ , '''transition_probabilities''' ) _validate_nested_dict(snake_case_ , '''emission_probabilities''' ) def lowercase__ ( snake_case_ :Any , snake_case_ :str ): _validate_dict(_object , snake_case_ , snake_case_ ) for x in _object.values(): _validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ): if not isinstance(_object , snake_case_ ): __UpperCAmelCase = F'''{var_name} must be a dict''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ): __UpperCAmelCase = F'''{var_name} all keys must be strings''' raise ValueError(snake_case_ ) if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ): __UpperCAmelCase = '''nested dictionary ''' if nested else '''''' __UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
49
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : List[Any] = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = "camembert" def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ): super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache __UpperCAmelCase = classifier_dropout class _UpperCAmelCase ( _lowerCAmelCase ): @property def a ( self : Tuple ): if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
49
1
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def lowercase__ ( snake_case_ :Sequence[float] , snake_case_ :int , snake_case_ :int ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] __UpperCAmelCase = (low + high) // 2 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_subarray(snake_case_ , snake_case_ , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_subarray(snake_case_ , mid + 1 , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = max_cross_sum(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def lowercase__ ( snake_case_ :Sequence[float] , snake_case_ :int , snake_case_ :int , snake_case_ :int ): __UpperCAmelCase , __UpperCAmelCase = float('''-inf''' ), -1 __UpperCAmelCase , __UpperCAmelCase = float('''-inf''' ), -1 __UpperCAmelCase = 0 for i in range(snake_case_ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __UpperCAmelCase = summ __UpperCAmelCase = i __UpperCAmelCase = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __UpperCAmelCase = summ __UpperCAmelCase = i return max_left, max_right, (left_sum + right_sum) def lowercase__ ( snake_case_ :int ): __UpperCAmelCase = [randint(1 , snake_case_ ) for _ in range(snake_case_ )] __UpperCAmelCase = time.time() max_subarray(snake_case_ , 0 , input_size - 1 ) __UpperCAmelCase = time.time() return end - start def lowercase__ ( ): __UpperCAmelCase = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000] __UpperCAmelCase = [time_max_subarray(snake_case_ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(snake_case_ , snake_case_ ): print(snake_case_ , '''\t\t''' , snake_case_ ) plt.plot(snake_case_ , snake_case_ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
49
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks if the entire collection has been sorted if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def lowercase__ ( snake_case_ :list , snake_case_ :int ): # Checks order between adjacent elements if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": _lowercase : Any = input('Enter integers separated by spaces: ') _lowercase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
49
1