repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/preprocess.py
|
# coding=utf-8
| 15 | 7 | 14 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/universal_datamodule/universal_datamodule.py
|
from pytorch_lightning import LightningDataModule
from typing import Optional
from torch.utils.data import DataLoader, DistributedSampler
from fengshen.models.megatron import mpu
def get_consume_samples(data_model: LightningDataModule) -> int:
if hasattr(data_model.trainer.lightning_module, 'consumed_samples'):
consumed_samples = data_model.trainer.lightning_module.consumed_samples
print('get consumed samples from model: {}'.format(consumed_samples))
else:
world_size = data_model.trainer.world_size
consumed_samples = max(0, data_model.trainer.global_step - 1) * \
data_model.hparams.train_batchsize * world_size * data_model.trainer.accumulate_grad_batches
print('calculate consumed samples: {}'.format(consumed_samples))
return consumed_samples
class UniversalDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Universal DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--dataloader_workers', default=2, type=int)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--val_batchsize', default=16, type=int)
parser.add_argument('--test_batchsize', default=16, type=int)
parser.add_argument('--datasets_name', type=str, default=None)
parser.add_argument('--train_datasets_field', type=str, default='train')
parser.add_argument('--val_datasets_field', type=str, default='validation')
parser.add_argument('--test_datasets_field', type=str, default='test')
parser.add_argument('--train_file', type=str, default=None)
parser.add_argument('--val_file', type=str, default=None)
parser.add_argument('--test_file', type=str, default=None)
parser.add_argument('--raw_file_type', type=str, default='json')
parser.add_argument('--sampler_type', type=str,
choices=['single',
'random'],
default='random')
parser.add_argument('--use_mpu', action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
return parent_args
def __init__(
self,
tokenizer,
collate_fn,
args,
datasets=None,
**kwargs,
):
super().__init__()
# 如果不传入datasets的名字,则可以在对象外部替换内部的datasets为模型需要的
if datasets is not None:
self.datasets = datasets
elif args.datasets_name is not None:
from fengshen.data.fs_datasets import load_dataset
print('---------begin to load datasets {}'.format(args.datasets_name))
self.datasets = load_dataset(
args.datasets_name, num_proc=args.num_workers)
print('---------ending load datasets {}'.format(args.datasets_name))
else:
print('---------begin to load datasets from local file')
from datasets import load_dataset
self.datasets = load_dataset(args.raw_file_type,
data_files={
args.train_datasets_field: args.train_file,
args.val_datasets_field: args.val_file,
args.test_datasets_field: args.test_file})
print('---------end to load datasets from local file')
self.tokenizer = tokenizer
self.collate_fn = collate_fn
self.save_hyperparameters(args)
def get_custom_sampler(self, ds):
from .universal_sampler import PretrainingRandomSampler
from .universal_sampler import PretrainingSampler
world_size = self.trainer.world_size
consumed_samples = get_consume_samples(self)
# use the user default sampler
data_parallel_rank = mpu.get_data_parallel_rank() if self.hparams.use_mpu else self.trainer.global_rank
data_parallel_size = mpu.get_data_parallel_world_size() if self.hparams.use_mpu else world_size
if self.hparams.sampler_type == 'random':
return PretrainingRandomSampler(
total_samples=len(ds),
# consumed_samples cal by global steps
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
epoch=self.trainer.current_epoch,
)
elif self.hparams.sampler_type == 'single':
return PretrainingSampler(
total_samples=len(ds),
# consumed_samples cal by global steps
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
)
else:
raise Exception('Unknown sampler type: {}'.format(self.hparams.sampler_type))
def setup(self, stage: Optional[str] = None) -> None:
return
def train_dataloader(self):
ds = self.datasets[self.hparams.train_datasets_field]
collate_fn = self.collate_fn
if hasattr(ds, 'collate_fn'):
collate_fn = ds.collate_fn
if self.hparams.replace_sampler_ddp is False:
return DataLoader(
ds,
batch_sampler=self.get_custom_sampler(ds),
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
pin_memory=True,
)
return DataLoader(
ds,
batch_size=self.hparams.train_batchsize,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
pin_memory=True,
)
def val_dataloader(self):
ds = self.datasets[self.hparams.val_datasets_field]
collate_fn = self.collate_fn
if hasattr(ds, 'collate_fn'):
collate_fn = ds.collate_fn
return DataLoader(
ds,
batch_size=self.hparams.val_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
# return DataLoader(
# ds, shuffle=False, batch_size=self.hparams.val_batchsize, pin_memory=False, collate_fn=collate_fn,
# )
def test_dataloader(self):
ds = self.datasets[self.hparams.test_datasets_field]
collate_fn = self.collate_fn
if collate_fn is None and hasattr(ds, 'collater'):
collate_fn = ds.collater
return DataLoader(
ds,
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
def predict_dataloader(self):
ds = self.datasets[self.hparams.test_datasets_field]
collate_fn = self.collate_fn
if collate_fn is None and hasattr(ds, 'collater'):
collate_fn = ds.collater
return DataLoader(
ds,
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.dataloader_workers,
collate_fn=collate_fn,
sampler=DistributedSampler(
ds, shuffle=False),
pin_memory=True,
)
| 7,829 | 40.210526 | 112 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/universal_datamodule/universal_sampler.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloaders."""
import torch
class PretrainingSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, drop_last=True):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.drop_last = drop_last
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.consumed_samples < self.total_samples, \
'no samples left to consume: {}, {}'.format(self.consumed_samples,
self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples // self.micro_batch_times_data_parallel_size
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self.micro_batch_times_data_parallel_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class PretrainingRandomSampler:
def __init__(self, total_samples, consumed_samples, micro_batch_size,
data_parallel_rank, data_parallel_size, epoch):
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.last_batch_size = \
self.total_samples % self.micro_batch_times_data_parallel_size
self.epoch = epoch
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples // self.micro_batch_times_data_parallel_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) \
* self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.micro_batch_times_data_parallel_size
yield batch
batch = []
def set_epoch(self, epoch):
self.epoch = epoch
| 5,181 | 40.126984 | 89 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/universal_datamodule/__init__.py
|
from .universal_datamodule import UniversalDataModule
from .universal_sampler import PretrainingSampler, PretrainingRandomSampler
__all__ = ['UniversalDataModule', 'PretrainingSampler', 'PretrainingRandomSampler']
| 215 | 42.2 | 83 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/mmap_dataloader/mmap_index_dataset.py
|
import numpy as np
import torch
from typing import List
from torch.utils.data import Dataset
class MMapIndexDataset(Dataset):
# datapaths 是所有的内存映射文件的路径
# input_tensor_name 是输入的tensor的名字 例如 ['input_ids'] 会存储在对应的文件里面
def __init__(self, datapaths: List[str], input_tensor_name: List[str]):
dict_idx_fp = {}
dict_bin_fp = {}
idx_len = []
for tensor_name in input_tensor_name:
idx_fp = []
bin_fp = []
len = 0
for data_path in datapaths:
idx_fp += [np.load(
data_path + '_' + tensor_name + '.npy', mmap_mode='r')]
bin_fp += [np.memmap(
data_path + '_' + tensor_name + '.bin',
dtype='long',
mode='r')]
len += idx_fp[-1].shape[0]
idx_len += [idx_fp[-1].shape[0]]
dict_idx_fp[tensor_name] = idx_fp
dict_bin_fp[tensor_name] = bin_fp
# 通常情况下不同的tensor的长度是一样的
self._len = len
self._input_tensor_name = input_tensor_name
self._dict_idx_fp = dict_idx_fp
self._dict_bin_fp = dict_bin_fp
self._idx_len = idx_len
def __len__(self):
return self._len
def __getitem__(self, idx):
sample = {}
for i in range(len(self._idx_len)):
if idx >= self._idx_len[i]:
idx -= self._idx_len[i]
else:
break
for tensor_name in self._input_tensor_name:
sample[tensor_name] = torch.tensor(self._dict_bin_fp[tensor_name][i][
self._dict_idx_fp[tensor_name][i][idx, 0]:
self._dict_idx_fp[tensor_name][i][idx, 1]
], dtype=torch.long)
# print(sample)
return sample
| 1,815 | 32.62963 | 81 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/mmap_dataloader/mmap_datamodule.py
|
from typing import Optional
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from fengshen.data.mmap_index_dataset import MMapIndexDataset
class MMapDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('MMAP DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_batchsize', default=32, type=int)
parser.add_argument('--eval_batchsize', default=32, type=int)
parser.add_argument('--test_batchsize', default=32, type=int)
parser.add_argument('--train_datas', default=[
'./train_datas'
], type=str, nargs='+')
parser.add_argument('--valid_datas', default=[
'./valid_datas'
], type=str, nargs='+')
parser.add_argument('--test_datas', default=[
'./test_datas'],
type=str, nargs='+')
parser.add_argument('--input_tensor_name', default=['input_ids'], type=str, nargs='+')
return parent_args
def __init__(
self,
collate_fn,
args,
**kwargs,
):
super().__init__()
self.collate_fn = collate_fn
self.train_dataset = MMapIndexDataset(args.train_datas, args.input_tensor_name)
self.valid_dataset = MMapIndexDataset(args.valid_datas, args.input_tensor_name)
self.test_dataset = MMapIndexDataset(args.test_datas, args.input_tensor_name)
self.save_hyperparameters(args)
def setup(self, stage: Optional[str] = None) -> None:
return super().setup(stage)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.valid_dataset,
batch_size=self.hparams.eval_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.hparams.test_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
| 2,461 | 34.681159 | 94 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/dreambooth_datasets/dreambooth_datasets.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : dreambooth_datasets.py
@Time : 2022/11/10 00:20
@Author : Gan Ruyi
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from pathlib import Path
def add_data_args(parent_args):
parser = parent_args.add_argument_group('taiyi stable diffusion data args')
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If not have enough images, additional images will be"
" sampled with class_prompt."
),
)
parser.add_argument(
"--resolution", type=int, default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
return parent_args
class DreamBoothDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
instance_data_dir,
instance_prompt,
tokenizer,
class_data_dir=None,
class_prompt=None,
size=512,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.instance_data_dir = Path(instance_data_dir)
if not self.instance_data_dir.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_dir).iterdir())
print(self.instance_images_path)
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_dir is not None:
self.class_data_dir = Path(class_data_dir)
self.class_data_dir.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_dir.iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_dir = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
example["instance_prompt_ids"] = self.tokenizer(
self.instance_prompt,
padding="do_not_pad",
truncation=True,
max_length=64,
# max_length=self.tokenizer.model_max_length,
).input_ids
if self.class_data_dir:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_prompt_ids"] = self.tokenizer(
self.class_prompt,
padding="do_not_pad",
truncation=True,
# max_length=self.tokenizer.model_max_length,
max_length=64,
).input_ids
return example
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
| 6,386 | 33.711957 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/t5_dataloader/t5_datasets.py
|
# coding=utf8
import json
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, MT5Config, MT5Tokenizer, BatchEncoding
import torch
import pytorch_lightning as pl
import numpy as np
from itertools import chain
import sys
sys.path.append('../../')
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/
text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .
Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the other
training hyperparmeters in a way that avoids padding.
This function helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,
and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for split_tokens())
as well as the length of the encoded targets. Note that this function assumes
the inputs and targets will have EOS appended and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
_input_length = num_nonnoise_tokens + num_noise_spans + 1
_output_length = num_noise_tokens + num_noise_spans + 1
return _input_length, _output_length
tokens_length = inputs_length
while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
tokens_length += 1
inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(
tokens_length)
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
return tokens_length, targets_length
class UnsuperviseT5Dataset(Dataset):
'''
Dataset Used for T5 unsuprvise pretrain.
load_data_type = 0: load raw data from data path and save tokenized data, call function load_data
load_data_type = 1: load tokenized data from path, call function load_tokenized_data
load_data_type = 2: load tokenized data from memery data, call function load_tokenized_memory_data
'''
def __init__(self, data_path, args, load_data_type=0, data=None):
super().__init__()
if args.tokenizer_type == 't5_tokenizer':
if args.new_vocab_path is not None:
self.tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path)
else:
self.tokenizer = MT5Tokenizer.from_pretrained(args.pretrained_model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path)
self.noise_density = 0.15
self.mean_noise_span_length = 3
self.text_column_name = args.text_column_name
self.dataset_num_workers = args.dataset_num_workers
self.max_seq_length = args.max_seq_length
self.remove_columns = args.remove_columns
# whether load tokenieze data
self.load_data_type = load_data_type
if self.load_data_type == 0:
# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
# To ensure that the input length is `max_seq_length`, we need to increase the maximum length
# according to `mlm_probability` and `mean_noise_span_length`.
# We can also define the label length accordingly.
self.expanded_inputs_length, self.targets_length = compute_input_and_target_lengths(
inputs_length=self.max_seq_length,
noise_density=self.noise_density,
mean_noise_span_length=self.mean_noise_span_length,
)
print('self.expanded_inputs_length, self.targets_length:{},{}'.format(
self.expanded_inputs_length, self.targets_length))
self.data = self.load_data(data_path)
elif self.load_data_type == 1:
self.data = self.load_tokenized_data(data_path)
else:
assert data is not None
self.data = self.load_tokenized_memory_data(data)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path):
# TODO: large data process
from data.fs_datasets import load_dataset
samples = load_dataset(
# samples = datasets.load_from_disk(data_path)['train']
data_path, num_proc=self.dataset_num_workers)['train']
# print(samples)
tokenized_datasets = samples.map(
self.tokenize_function,
batched=True,
num_proc=self.dataset_num_workers,
# load_from_cache_file=not data_args.overwrite_cache,
).map(
batched=True,
num_proc=self.dataset_num_workers,
remove_columns=self.remove_columns)
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
self.group_texts,
batched=True,
num_proc=self.dataset_num_workers,
# load_from_cache_file=not data_args.overwrite_cache,
)
return tokenized_datasets
'''
The function load tokenized data saved from load_data function.
'''
def load_tokenized_data(self, data_path):
from data.fs_datasets import load_dataset
samples = load_dataset(data_path)['train']
return samples
def load_tokenized_memory_data(self, data):
return data
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# Since we make sure that all sequences are of the same length, no attention_mask is needed.
def tokenize_function(self, examples):
# 这里add_special_tokens=False,避免句子中间出现eos
return self.tokenizer(examples[self.text_column_name],
add_special_tokens=False,
return_attention_mask=False)
# Main data processing function that will concatenate all texts from our dataset
# and generate chunks of expanded_inputs_length.
def group_texts(self, examples):
# Concatenate all texts.
concatenated_examples = {
k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= self.expanded_inputs_length:
total_length = (
total_length // self.expanded_inputs_length) * self.expanded_inputs_length
# Split by chunks of max_len.
result = {
k: [t[i: i + self.expanded_inputs_length]
for i in range(0, total_length, self.expanded_inputs_length)]
for k, t in concatenated_examples.items()
}
return result
class UnsuperviseT5DataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('UnsuperviseT5DataModel')
parser.add_argument('--dataset_num_workers', default=8, type=int)
parser.add_argument('--dataloader_num_workers', default=4, type=int)
parser.add_argument(
'--train_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument('--train_batchsize', default=2, type=int)
parser.add_argument('--valid_batchsize', default=2, type=int)
parser.add_argument('--train_split_size', default=None, type=float)
parser.add_argument('--tokenizer_type', default='t5_tokenizer', choices=['t5_tokenizer', 'bert_tokenizer'])
parser.add_argument('--text_column_name', default='text')
parser.add_argument('--remove_columns', nargs='+', default=[])
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
if args.train_split_size is not None:
from data.fs_datasets import load_dataset
data_splits = load_dataset(args.train_data_path, num_proc=args.dataset_num_workers)
train_split = data_splits['train']
test_split = data_splits['test']
print('train:', train_split, '\ntest_data:', test_split)
self.train_dataset = UnsuperviseT5Dataset('', args, load_data_type=2, data=train_split)
self.test_dataset = UnsuperviseT5Dataset('', args, load_data_type=2, data=test_split)
else:
self.train_data = UnsuperviseT5Dataset(args.train_data_path, args, load_data_type=1)
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.noise_density = 0.15
self.mean_noise_span_length = 3
self.pad_token_id = self.config.pad_token_id
self.decoder_start_token_id = self.config.decoder_start_token_id
self.eos_token_id = self.config.eos_token_id
self.vocab_size = self.config.vocab_size
self.max_seq_length = args.max_seq_length
# 因为加载旧的spm里面已经包括了exrta_ids,但是T5Tokenizer会在spm的基础上再增加100个extra_ids,所以需要指定extra_ids=0
if args.tokenizer_type == 't5_tokenizer' and args.new_vocab_path is not None:
self.tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path, extra_ids=0)
# 如果是刚开始加载mt5,需要更新vocab_size为提取中英词之后的new_vocab_size
self.vocab_size = len(self.tokenizer)
# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
# To ensure that the input length is `max_seq_length`, we need to increase the maximum length
# according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly.
self.expanded_inputs_length, self.targets_length = compute_input_and_target_lengths(
inputs_length=self.max_seq_length,
noise_density=self.noise_density,
mean_noise_span_length=self.mean_noise_span_length,
)
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import PretrainingSampler
from fengshen.data.universal_datamodule.universal_datamodule import get_consume_samples
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
batch_sampler = PretrainingSampler(
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank,
data_parallel_size=self.trainer.world_size,
)
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def predict_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False)
return DataLoader(
self.test_data,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def collate_fn(self, examples):
# convert list to dict and tensorize input
batch = BatchEncoding(
{k: np.array([examples[i][k] for i in range(len(examples))])
for k, v in examples[0].items()}
)
input_ids = np.array(batch['input_ids'])
batch_size, expanded_input_length = input_ids.shape
mask_indices = np.asarray([self.random_spans_noise_mask(
expanded_input_length) for i in range(batch_size)])
labels_mask = ~mask_indices
input_ids_sentinel = self.create_sentinel_ids(
mask_indices.astype(np.int8))
labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8))
batch["input_ids"] = self.filter_input_ids(
input_ids, input_ids_sentinel)
batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel)
if batch["input_ids"].shape[-1] != self.max_seq_length:
raise ValueError(
f"`input_ids` are incorrectly preprocessed. `input_ids` length is \
{batch['input_ids'].shape[-1]}, but should be {self.targets_length}."
)
if batch["labels"].shape[-1] != self.targets_length:
raise ValueError(
f"`labels` are incorrectly preprocessed. `labels` length is \
{batch['labels'].shape[-1]}, but should be {self.targets_length}."
)
batch["decoder_input_ids"] = self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
)
for k, v in batch.items():
batch[k] = torch.tensor(v)
# print(k, batch[k], self.tokenizer.batch_decode(batch[k]), '\n', flush=True)
return batch
def create_sentinel_ids(self, mask_indices):
"""
Sentinel ids creation given the indices that should be masked.
The start indices of each mask are replaced by the sentinel ids in increasing
order. Consecutive mask indices to be deleted are replaced with `-1`.
"""
start_indices = mask_indices - \
np.roll(mask_indices, 1, axis=-1) * mask_indices
start_indices[:, 0] = mask_indices[:, 0]
sentinel_ids = np.where(start_indices != 0, np.cumsum(
start_indices, axis=-1), start_indices)
sentinel_ids = np.where(
sentinel_ids != 0, (self.vocab_size - sentinel_ids), 0)
sentinel_ids -= mask_indices - start_indices
return sentinel_ids
def filter_input_ids(self, input_ids, sentinel_ids):
"""
Puts sentinel mask on `input_ids` and fuse consecutive mask tokens into a single mask token by deleting.
This will reduce the sequence length from `expanded_inputs_length` to `input_length`.
"""
batch_size = input_ids.shape[0]
input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids)
# input_ids tokens and sentinel tokens are >= 0, tokens < 0 are
# masked tokens coming after sentinel tokens and should be removed
input_ids = input_ids_full[input_ids_full >=
0].reshape((batch_size, -1))
input_ids = np.concatenate(
[input_ids, np.full((batch_size, 1), self.eos_token_id, dtype=np.int32)], axis=-1
)
return input_ids
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
def random_spans_noise_mask(self, length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/
blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ .
Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
mean_noise_span_length: a number
Returns:
a boolean tensor with shape [length]
"""
orig_length = length
num_noise_tokens = int(np.round(length * self.noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
num_noise_spans = int(
np.round(num_noise_tokens / self.mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = max(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
mask_indices = np.arange(num_items - 1) < (num_segments - 1)
np.random.shuffle(mask_indices)
first_in_segment = np.pad(mask_indices, [[1, 0]])
segment_id = np.cumsum(first_in_segment)
# count length of sub segments assuming that list is sorted
_, segment_length = np.unique(segment_id, return_counts=True)
return segment_length
noise_span_lengths = _random_segmentation(
num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _random_segmentation(
num_nonnoise_tokens, num_noise_spans)
interleaved_span_lengths = np.reshape(
np.stack([nonnoise_span_lengths, noise_span_lengths],
axis=1), [num_noise_spans * 2]
)
span_starts = np.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = np.zeros((length,), dtype=np.int8)
span_start_indicator[span_starts] = True
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:orig_length]
class TaskT5Dataset(Dataset):
def __init__(self, data_path, args):
super().__init__()
self.max_length = args.max_seq_length
if args.tokenizer_type == 't5_tokenizer':
self.tokenizer = MT5Tokenizer.from_pretrained(args.pretrained_model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path)
self.data = self.load_data(data_path)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
samples = []
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in tqdm(lines):
samples.append(json.loads(line))
return samples
def encode(self, item):
if item["textb"] != "":
text = item['question'] + ','.join(item['choice'])+'。' + f"""{item["texta"]}""" + f"""{item["textb"]}"""
else:
text = f"""{item["question"]}""" + ",".join(item["choice"]) + "。" + f"""{item["texta"]}"""
label = item['answer']
encode_dict = self.tokenizer.encode_plus(text, max_length=self.max_length, padding='max_length',
truncation=True, return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(label, max_length=16, padding='max_length',
truncation=True)
answer_token = []
max_label_len = 0
choice_encode = [] # 用来确定模型生成的最大长度
for a in item['choice']:
answer_encode = self.tokenizer.encode(a)
choice_encode.append(answer_encode)
if len(answer_encode) > max_label_len:
max_label_len = len(answer_encode)
for an in answer_encode:
if an not in answer_token:
answer_token.append(an)
# bad_words_ids = [[i] for i in range(self.tokenizer.vocab_size) if i not in answer_token] #不生成这些token
# while len(bad_words_ids)<self.tokenizer.vocab_size:
# bad_words_ids.append(bad_words_ids[0])
# bad_words_ids = [[423],[67],[878]]
encode_sent = encode_dict['input_ids'].squeeze()
attention_mask = encode_dict['attention_mask'].squeeze()
target = decode_dict['input_ids']
labels = torch.tensor(target)
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": torch.tensor(encode_sent).long(),
"attention_mask": torch.tensor(attention_mask).float(),
"labels": torch.tensor(target).long(),
"force_words_ids": answer_token,
}
class TaskT5DataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TaskT5DataModel')
parser.add_argument('--dataset_num_workers', default=8, type=int)
parser.add_argument('--dataloader_num_workers', default=4, type=int)
parser.add_argument(
'--train_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument(
'--valid_data_path', default='wudao_180g_mt5_tokenized', type=str)
parser.add_argument('--train_batchsize', default=2, type=int)
parser.add_argument('--valid_batchsize', default=2, type=int)
parser.add_argument('--train_split_size', default=None, type=float)
parser.add_argument('--tokenizer_type', default='t5_tokenizer', choices=['t5_tokenizer', 'bert_tokenizer'])
parser.add_argument('--text_column_name', default='text')
parser.add_argument('--remove_columns', nargs='+', default=[])
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.train_dataset = TaskT5Dataset(args.train_data_path, args)
self.valid_dataset = TaskT5Dataset(args.valid_data_path, args)
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import PretrainingSampler
from fengshen.data.universal_datamodule.universal_datamodule import get_consume_samples
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
# batch_sampler = PretrainingRandomSampler(
batch_sampler = PretrainingSampler(
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank,
data_parallel_size=self.trainer.world_size,
)
# epoch=self.trainer.current_epoch
# )
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.valid_dataset, shuffle=False)
return DataLoader(
self.valid_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers
)
| 25,946 | 45.087034 | 127 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/t5_dataloader/t5_gen_datasets.py
|
# -*- encoding: utf-8 -*-
'''
@File : t5_gen_datasets.py
@Time : 2022/10/24 19:29
@Author : He Junqing
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
from logging import exception
from transformers import (
BertTokenizer,
MT5Config,
MT5Tokenizer,
MT5ForConditionalGeneration,
)
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import pytorch_lightning as pl
import numpy as np
import sys
sys.path.append("../../")
special_token_dict = {
"additional_special_tokens": [
"[CTSTART]",
"[CTEND]",
"[SEP]",
"[KNSTART]",
"[KNEND]",
]
}
class DialogDataset(Dataset):
def __init__(self, data_path, args, data, load_data_type=1) -> None:
super().__init__()
if args.tokenizer_type == "t5_tokenizer":
self.tokenizer = MT5Tokenizer.from_pretrained(
args.pretrained_model_path)
if len(self.tokenizer) == 32596:
self.tokenizer.add_special_tokens(special_token_dict)
print(
"add special tokens to tokenizer,vocab size:",
len(self.tokenizer)
)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
self.model.resize_token_embeddings(len(self.tokenizer))
self.model.save_pretrained(args.new_vocab_path)
self.tokenizer.save_pretrained(
args.new_vocab_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(
args.pretrained_model_path)
self.load_data_type = load_data_type
self.data_split = data
self.num_workers = args.preprocessing_num_workers
self.max_seq_length = args.max_seq_length
self.max_knowledge_length = args.max_knowledge_length
self.max_target_length = args.max_target_length
# tokenizer config
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.decoder_start_token_id = self.config.decoder_start_token_id
self.eos_token_id = self.config.eos_token_id
self.vocab_size = self.config.vocab_size
# print(self.tokenizer.decode([2]))
# load from raw data or hf dataset
if self.load_data_type == 0:
self.data = self.load_data(data_path)
elif self.load_data_type == 1:
self.data = self.load_packed_data(data_path)
else: # for testing
self.data = data_path
def load_packed_data(self, data_path):
from fengshen.data.fs_datasets import load_dataset
samples = load_dataset(data_path,
num_proc=self.num_workers)[self.data_split]
tokenized_samples = samples.map(
self.regular_tokenize, batched=False,
num_proc=self.num_workers
)
return tokenized_samples
def load_data(self, data_path):
"""
load data from raw data
return untokoenized data
"""
from datasets import load_dataset
ds = load_dataset("json", data_files=data_path)['train']
samples = ds.map(self.regular_tokenize, batched=False, num_proc=self.num_workers
)
return samples
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def regular_tokenize(self, sample):
# print(len(sample['context']))
context_ids = self.tokenizer(
sample["context"],
add_special_tokens=True,
return_attention_mask=False,
return_token_type_ids=True,
)
context_types = self.get_token_type(
sample["context"], context_ids["token_type_ids"]
)
# print('context',sample['context'])
# print('context_ids',context_ids['input_ids'])
knowledge_ids = self.tokenizer.encode(
sample["knowledge"], add_special_tokens=False
)
# print('knowledge_ids',knowledge_ids)
if isinstance(knowledge_ids, int):
knowledge_ids = [knowledge_ids]
target_ids = self.tokenizer.encode(
sample["target"],
add_special_tokens=False,
max_length=self.max_target_length - 1,
truncation=True,
)
# print('target',sample['target'])
# print('target_ids',target_ids)
# print('decode target',self.tokenizer.decode(target_ids))
# truncate
knowledge_ids = (
[self.tokenizer.convert_tokens_to_ids("[KNSTART]")]
+ knowledge_ids[: self.max_knowledge_length - 2]
+ [self.tokenizer.convert_tokens_to_ids("[KNEND]")]
)
l_kn = len(knowledge_ids)
knowledge_types = [2] * l_kn
flatten_context = []
for line in context_ids["input_ids"]:
flatten_context.extend(line)
l_ct = min(len(flatten_context), self.max_seq_length - l_kn - 2)
context_ids = (
[self.tokenizer.convert_tokens_to_ids("[CTSTART]")]
+ flatten_context[-l_ct:]
+ [self.tokenizer.convert_tokens_to_ids("[CTEND]")]
)
context_types = context_types[-l_ct:] + [0]
context_types.insert(0, context_types[0])
assert len(context_ids) == len(
context_types
), "len of context ids and token types unmatch, context:{},ids:{} types:{},len {}:{}".format(
sample["context"],
context_ids,
context_types,
len(context_ids),
len(context_types),
)
try:
target_ids = target_ids + [self.eos_token_id]
except exception:
print(sample["target"], target_ids, self.eos_token_id)
tokenized = {}
tokenized["input_ids"] = np.array(context_ids + knowledge_ids, dtype=np.int32)
tokenized["token_types"] = np.array(
context_types + knowledge_types, dtype=np.int32
)
tokenized["attention_mask"] = np.ones(
len(context_types + knowledge_types), dtype=np.int8
)
tokenized["labels"] = np.array(target_ids, dtype=np.int32)
return tokenized
def get_token_type(self, context, tokentypes=None):
# token_type fail in tokenizer, all zero
context_token_types = []
for i, line in enumerate(context):
if tokentypes:
if i % 2 == 0:
token_type = [0] * len(tokentypes[i])
else:
token_type = [1] * len(tokentypes[i])
else:
if i % 2 == 0:
token_type = [0] * (1 + len(line))
else:
token_type = [1] * (1 + len(line))
context_token_types.extend(token_type)
return context_token_types
class DialogDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group("SuperviseT5DataModel")
parser.add_argument("--dataset_num_workers", default=8, type=int)
parser.add_argument("--dataloader_num_workers", default=4, type=int)
parser.add_argument("--train_data_path", default="dialog_4g_test", type=str)
parser.add_argument(
"--valid_data_path", default="wudao_180g_mt5_tokenized", type=str
)
parser.add_argument("--train_batchsize", default=2, type=int)
parser.add_argument("--valid_batchsize", default=2, type=int)
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--max_knowledge_length", default=128, type=int)
parser.add_argument("--max_target_length", default=128, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.load_data(args)
self.epochs = args.max_epochs
def load_data(self, args):
if args.train_split_size is not None:
from fengshen.data.fs_datasets import load_dataset
data_splits = load_dataset(
args.train_data_path, num_proc=args.dataset_num_workers
)
train_split = data_splits['train']
test_split = data_splits['test']
print('train:', train_split, '\ntest_data:', test_split)
self.train_dataset = DialogDataset(
args.train_data_path, args, load_data_type=1, data="train"
)
self.test_dataset = DialogDataset(
args.train_data_path, args, load_data_type=1, data="test"
)
else:
self.train_data = DialogDataset(
args.train_data_path, args, load_data_type=1
)
self.config = MT5Config.from_pretrained(args.pretrained_model_path)
self.pad_token_id = self.config.pad_token_id
self.decoder_start_token_id = self.config.decoder_start_token_id
print("bos id:", self.decoder_start_token_id)
def collate_fn(self, samples):
batch = {
k: [
torch.tensor(samples[i][k], dtype=torch.int64)
for i in range(len(samples))
]
for k in ["input_ids", "token_types", "attention_mask", "labels"]
}
# print(batch)
for k, v in batch.items():
if k != "labels":
batch[k] = pad_sequence(
v, batch_first=True, padding_value=self.pad_token_id
)
else:
batch[k] = pad_sequence(v, batch_first=True, padding_value=-100)
batch["decoder_input_ids"] = torch.tensor(
self.shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
),
dtype=torch.long,
)
return batch
def shift_tokens_right(
self, input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(
shifted_input_ids == -100, pad_token_id, shifted_input_ids
)
return shifted_input_ids
def train_dataloader(self):
from fengshen.data.universal_datamodule.universal_sampler import (
PretrainingRandomSampler,
)
from fengshen.data.universal_datamodule.universal_datamodule import (
get_consume_samples,
)
# 采用自定义的sampler,确保继续训练能正确取到数据
consumed_samples = get_consume_samples(self)
batch_sampler = PretrainingRandomSampler(
epoch=self.epochs,
total_samples=len(self.train_dataset),
consumed_samples=consumed_samples,
micro_batch_size=self.hparams.train_batchsize,
data_parallel_rank=self.trainer.global_rank, # gpu idx
data_parallel_size=self.trainer.world_size, # gpu num
)
return DataLoader(
self.train_dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False
)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
def predict_dataloader(self):
sampler = torch.utils.data.distributed.DistributedSampler(
self.test_dataset, shuffle=False
)
return DataLoader(
self.test_dataset,
sampler=sampler,
shuffle=False,
batch_size=self.hparams.valid_batchsize,
pin_memory=True,
num_workers=self.hparams.dataloader_num_workers,
collate_fn=self.collate_fn,
)
if __name__ == "__main__":
# test
import argparse
total_parser = argparse.ArgumentParser("DATASET parser")
total_parser.add_argument(
"--tokenizer_type",
default="t5_tokenizer",
choices=["bert_tokenizer", "t5_tokenizer"],
)
total_parser.add_argument("--preprocessing_num_workers", default="10", type=int)
total_parser.add_argument(
"--new_vocab_path",
default="/cognitive_comp/hejunqing/projects/Dialog_pretrain/randeng_t5_newvocab_784M",
type=str,
)
total_parser.add_argument("--train_split_size", default=0.995, type=int)
total_parser.add_argument(
"--pretrained_model_path",
default="/cognitive_comp/hejunqing/projects/Dialog_pretrain/randeng_t5_newvocab_784M",
)
total_parser = DialogDataModel.add_data_specific_args(total_parser)
args = total_parser.parse_args()
dl = DialogDataModel(args)
for i in range(5):
for batch in dl.train_dataloader():
print(batch)
print(batch["input_ids"])
print(batch["token_types"])
print(batch["decoder_input_ids"])
print(batch["labels"])
print("test finish")
| 13,701 | 33.954082 | 101 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/taiyi_stable_diffusion_datasets/taiyi_datasets.py
|
from torch.utils.data import Dataset, ConcatDataset
import os
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
def add_data_args(parent_args):
parser = parent_args.add_argument_group('taiyi stable diffusion data args')
# 支持传入多个路径,分别加载
parser.add_argument(
"--datasets_path", type=str, default=None, required=True, nargs='+',
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--datasets_type", type=str, default=None, required=True, choices=['txt', 'csv', 'fs_datasets'], nargs='+',
help="dataset type, txt or csv, same len as datasets_path",
)
parser.add_argument(
"--resolution", type=int, default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", default=False,
help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--thres", type=float, default=0.2)
return parent_args
class TXTDataset(Dataset):
# 添加Txt数据集读取,主要是针对Zero23m数据集。
def __init__(self,
foloder_name,
thres=0.2):
super().__init__()
# print(f'Loading folder data from {foloder_name}.')
self.image_paths = []
'''
暂时没有开源这部分文件
score_data = pd.read_csv(os.path.join(foloder_name, 'score.csv'))
img_path2score = {score_data['image_path'][i]: score_data['score'][i]
for i in range(len(score_data))}
'''
# print(img_path2score)
# 这里都存的是地址,避免初始化时间过多。
for each_file in os.listdir(foloder_name):
if each_file.endswith('.jpg'):
self.image_paths.append(os.path.join(foloder_name, each_file))
# print('Done loading data. Len of images:', len(self.image_paths))
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
img_path = str(self.image_paths[idx])
caption_path = img_path.replace('.jpg', '.txt') # 图片名称和文本名称一致。
with open(caption_path, 'r') as f:
caption = f.read()
return {'img_path': img_path, 'caption': caption}
# NOTE 加速读取数据,直接用原版的,在外部使用并行读取策略。30min->3min
class CSVDataset(Dataset):
def __init__(self,
input_filename,
image_root,
img_key,
caption_key,
thres=0.2):
super().__init__()
# logging.debug(f'Loading csv data from {input_filename}.')
print(f'Loading csv data from {input_filename}.')
self.images = []
self.captions = []
if input_filename.endswith('.csv'):
# print(f"Load Data from{input_filename}")
df = pd.read_csv(input_filename, index_col=0, on_bad_lines='skip')
print(f'file {input_filename} datalen {len(df)}')
# 这个图片的路径也需要根据数据集的结构稍微做点修改
self.images.extend(df[img_key].tolist())
self.captions.extend(df[caption_key].tolist())
self.image_root = image_root
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = os.path.join(self.image_root, str(self.images[idx]))
return {'img_path': img_path, 'caption': self.captions[idx]}
def if_final_dir(path: str) -> bool:
# 如果当前目录有一个文件,那就算是终极目录
for f in os.scandir(path):
if f.is_file():
return True
return False
def process_pool_read_txt_dataset(args,
input_root=None,
thres=0.2):
p = ProcessPoolExecutor(max_workers=20)
all_datasets = []
res = []
# 遍历该目录下所有的子目录
def traversal_files(path: str):
list_subfolders_with_paths = [f.path for f in os.scandir(path) if f.is_dir()]
for dir_path in list_subfolders_with_paths:
if if_final_dir(dir_path):
res.append(p.submit(TXTDataset,
dir_path,
thres))
else:
traversal_files(dir_path)
traversal_files(input_root)
p.shutdown()
for future in res:
all_datasets.append(future.result())
dataset = ConcatDataset(all_datasets)
return dataset
def process_pool_read_csv_dataset(args,
input_root,
thres=0.20):
# here input_filename is a directory containing a CSV file
all_csvs = os.listdir(os.path.join(input_root, 'release'))
image_root = os.path.join(input_root, 'images')
# csv_with_score = [each for each in all_csvs if 'score' in each]
all_datasets = []
res = []
p = ProcessPoolExecutor(max_workers=150)
for path in all_csvs:
each_csv_path = os.path.join(input_root, 'release', path)
res.append(p.submit(CSVDataset,
each_csv_path,
image_root,
img_key="name",
caption_key="caption",
thres=thres))
p.shutdown()
for future in res:
all_datasets.append(future.result())
dataset = ConcatDataset(all_datasets)
return dataset
def load_data(args, global_rank=0):
assert len(args.datasets_path) == len(args.datasets_type), \
"datasets_path num not equal to datasets_type"
all_datasets = []
for path, type in zip(args.datasets_path, args.datasets_type):
if type == 'txt':
all_datasets.append(process_pool_read_txt_dataset(
args, input_root=path, thres=args.thres))
elif type == 'csv':
all_datasets.append(process_pool_read_csv_dataset(
args, input_root=path, thres=args.thres))
elif type == 'fs_datasets':
from fengshen.data.fs_datasets import load_dataset
all_datasets.append(load_dataset(path, num_proc=args.num_workers,
thres=args.thres, global_rank=global_rank)['train'])
else:
raise ValueError('unsupport dataset type: %s' % type)
print(f'load datasset {type} {path} len {len(all_datasets[-1])}')
return {'train': ConcatDataset(all_datasets)}
| 6,417 | 35.885057 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/task_dataloader/medicalQADataset.py
|
# coding=utf8
import os
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoTokenizer
class GPT2QADataset(Dataset):
'''
Dataset Used for yuyuan medical qa task.
Just surpport small datasets, when deal with large datasets it may be slowly.
for large datasets please use mmapdatasets(doing)
'''
def __init__(self, data_path, name, args):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path)
if self.tokenizer.pad_token is None:
self.tokenizer.add_special_tokens({'pad_token': '<|endoftext|>'})
self.data_size = os.path.getsize(data_path)/1024/1024/1024
self.data_type_name = name
self.data = self.load_data(data_path)
self.max_seq_length = args.max_seq_length
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
# 有进度条展示
if self.data_size <= 5:
with open(data_path, "rt", encoding='utf8') as f:
lines = f.readlines()
total_num = len(lines)
data_gen = lines
else:
data_gen = open(data_path, "rt", encoding='utf8')
total_num = None
data = []
with tqdm(total=total_num, desc=f'{self.data_type_name}处理进度', mininterval=0.3) as bar:
for idx, line in enumerate(data_gen):
data.append(self.data_parse(line))
bar.update()
if self.data_size > 5:
data_gen.close()
return data
def data_parse(self, line):
"""
解析不同格式的数据
"""
dic = eval(line.strip())
return dic
def encode(self, item):
"""
将数据转换成模型训练的输入
"""
inputs_dict = self.tokenizer.encode_plus(item['Question']+item['answer'],
max_length=self.max_seq_length, padding='max_length',
truncation=True, return_tensors='pt')
target = inputs_dict['input_ids']
labels = target.clone().detach()
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": inputs_dict['input_ids'].squeeze(),
"attention_mask": inputs_dict['attention_mask'].squeeze(),
"labels": labels.squeeze(),
"question": item['Question'],
"answer": item['answer']
}
class GPT2QADataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('GPT2QADataModel')
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--train_data', default='train.txt', type=str)
parser.add_argument('--valid_data', default='valid.txt', type=str)
parser.add_argument('--test_data', default='test.txt', type=str)
parser.add_argument('--train_batchsize', type=int, required=True)
parser.add_argument('--valid_batchsize', type=int, required=True)
parser.add_argument('--max_seq_length', default=1024, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.args = args
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
if not args.do_eval_only:
self.train_data = GPT2QADataset(os.path.join(
args.data_dir, args.train_data), '训练集', args)
self.valid_data = GPT2QADataset(os.path.join(
args.data_dir, args.valid_data), '验证集', args)
self.test_data = GPT2QADataset(os.path.join(
args.data_dir, args.test_data), '测试集', args)
def train_dataloader(self):
return DataLoader(
self.train_data, shuffle=True,
batch_size=self.train_batchsize,
pin_memory=False, num_workers=self.args.num_workers)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False, num_workers=self.args.num_workers)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False,
batch_size=self.valid_batchsize, pin_memory=False,
num_workers=self.args.num_workers)
if __name__ == '__main__':
import argparse
modelfile = '/cognitive_comp/wuziwei/pretrained_model_hf/medical_v2'
datafile = '/cognitive_comp/wuziwei/task-data/medical_qa/medical_qa_train.txt'
parser = argparse.ArgumentParser(description='hf test', allow_abbrev=False)
group = parser.add_argument_group(title='test args')
group.add_argument('--pretrained-model-path', type=str, default=modelfile,
help='Number of transformer layers.')
group.add_argument('--max-seq-length', type=int, default=1024)
args = parser.parse_args()
testml = GPT2QADataset(datafile, 'medical_qa', args=args)
print(testml[10])
| 5,285 | 37.304348 | 102 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/task_dataloader/task_datasets.py
|
# coding=utf8
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
import json
import torch
import pytorch_lightning as pl
import os
class AbstractCollator:
"""
collector for summary task
"""
def __init__(self, tokenizer, max_enc_length, max_dec_length, prompt):
self.tokenizer = tokenizer
self.max_enc_length = max_enc_length
self.max_dec_length = max_dec_length
self.prompt = prompt
def __call__(self, samples):
labels = []
attn_mask = []
# decoder_attn_mask = []
source_inputs = []
for sample in samples:
encode_dict = self.tokenizer.encode_plus(
self.prompt + sample['text'],
max_length=self.max_enc_length,
padding='max_length',
truncation=True,
return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(
sample['summary'],
max_length=self.max_dec_length,
padding='max_length',
truncation=True,
return_tensors='pt')
source_inputs.append(encode_dict['input_ids'].squeeze())
labels.append(decode_dict['input_ids'].squeeze())
attn_mask.append(encode_dict['attention_mask'].squeeze())
# decoder_attn_mask.append(decode_dict['attention_mask'].squeeze())
# labels = torch.tensor(decode_dict['input'])
source_inputs = torch.stack(source_inputs)
labels = torch.stack(labels)
attn_mask = torch.stack(attn_mask)
# decoder_attn_mask = torch.stack(decoder_attn_mask)
# decode_input_idxs = shift_tokens_right(labels, self.tokenizer.pad_token_id, self.tokenizer.pad_token_id)
end_token_index = torch.where(labels == self.tokenizer.eos_token_id)[1]
for idx, end_idx in enumerate(end_token_index):
labels[idx][end_idx + 1:] = -100
return {
"input_ids": source_inputs,
"attention_mask": attn_mask,
"labels": labels,
"text": [sample['text'] for sample in samples],
"summary": [sample['summary'] for sample in samples]
}
class LCSTSDataset(Dataset):
'''
Dataset Used for LCSTS summary task.
'''
def __init__(self, data_path, args):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, use_fast=False)
self.data = self.load_data(data_path)
self.prompt = args.prompt
self.max_enc_length = args.max_enc_length
self.max_dec_length = args.max_dec_length
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def load_data(self, data_path):
with open(data_path, "r", encoding='utf8') as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
obj = json.loads(line)
source = obj['text']
target = obj['summary']
samples.append({
"text": source,
"summary": target
})
return samples
def cal_data(self, data_path):
with open(data_path, "r", encoding='utf8') as f:
lines = f.readlines()
samples = []
enc_sizes = []
dec_sizes = []
for line in tqdm(lines):
obj = json.loads(line.strip())
source = obj['text']
target = obj['summary']
enc_input_ids = self.tokenizer.encode(source)
target = self.tokenizer.encode(target)
enc_sizes.append(len(enc_input_ids))
dec_sizes.append(len(target)-1)
samples.append({
"enc_input_ids": enc_input_ids,
"dec_input_ids": target[:-1],
"label_ids": target[1:]
})
max_enc_len = max(enc_sizes)
max_dec_len = max(dec_sizes)
import numpy as np
# mean of len(enc_input_ids): 74.68041911345998
# mean of len(dec_input_ids): 14.02265483791283
# max of len(enc_input_ids): 132
# max of len(dec_input_ids): 31
print('mean of len(enc_input_ids):', np.mean(enc_sizes),
'mean of len(dec_input_ids):', np.mean(dec_sizes),
'max of len(enc_input_ids):', max_enc_len,
'max of len(dec_input_ids):', max_dec_len)
return samples
def encode(self, item):
encode_dict = self.tokenizer.encode_plus(
self.prompt + item['text'],
max_length=self.max_enc_length,
padding='max_length',
truncation=True,
return_tensors='pt')
decode_dict = self.tokenizer.encode_plus(
item['summary'],
max_length=self.max_dec_length,
padding='max_length',
truncation=True)
target = decode_dict['input_ids']
# print('encode_dict shape:', encode_dict['input_ids'].shape)
labels = torch.tensor(target)
labels[target == self.tokenizer.pad_token_id] = -100
return {
"input_ids": encode_dict['input_ids'].squeeze(),
"attention_mask": encode_dict['attention_mask'].squeeze(),
"labels": labels.squeeze(),
"text": item['text'],
"summary": item['summary']
}
class LCSTSDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('LCSTSDataModel')
parser.add_argument(
'--data_dir', default='/cognitive_comp/ganruyi/data_datasets_LCSTS_LCSTS/', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.jsonl', type=str)
parser.add_argument('--valid_data', default='valid.jsonl', type=str)
parser.add_argument('--test_data', default='test_public.jsonl', type=str)
parser.add_argument('--train_batchsize', default=128, type=int)
parser.add_argument('--valid_batchsize', default=128, type=int)
parser.add_argument('--max_enc_length', default=128, type=int)
parser.add_argument('--max_dec_length', default=30, type=int)
parser.add_argument('--prompt', default='summarize:', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.args = args
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
if not args.do_eval_only:
self.train_data = LCSTSDataset(os.path.join(
args.data_dir, args.train_data), args)
self.valid_data = LCSTSDataset(os.path.join(
args.data_dir, args.valid_data), args)
self.test_data = LCSTSDataset(os.path.join(
args.data_dir, args.test_data), args)
def train_dataloader(self):
return DataLoader(self.train_data,
shuffle=True,
batch_size=self.train_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
def val_dataloader(self):
return DataLoader(self.valid_data,
shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
def predict_dataloader(self):
return DataLoader(self.test_data,
shuffle=False,
batch_size=self.valid_batchsize,
pin_memory=False,
num_workers=self.args.num_workers)
| 7,832 | 36.84058 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/task_dataloader/__init__.py
|
# coding=utf-8
from .task_datasets import LCSTSDataModel, LCSTSDataset
__all__ = ['LCSTSDataModel', 'LCSTSDataset']
| 116 | 28.25 | 55 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/hubert/hubert_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Hubert Dataset')
parser.add_argument('--data', type=str)
parser.add_argument('--sample_rate', type=float, default=16000)
parser.add_argument('--label_dir', type=str)
parser.add_argument('--labels', type=str, nargs='+')
parser.add_argument('--label_rate', type=float)
parser.add_argument('--max_keep_size', type=int, default=None)
parser.add_argument('--min_sample_size', type=int)
parser.add_argument('--max_sample_size', type=int)
parser.add_argument('--pad_audio', type=bool)
parser.add_argument('--normalize', type=bool)
parser.add_argument('--random_crop', type=bool)
parser.add_argument('--single_target', type=bool, default=False)
return parent_args
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes = [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
class HubertDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
):
self.audio_root, self.audio_names, inds, tot, self.sizes = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.random_crop = random_crop
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, float)
else label_rates
)
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(
self.sizes, sample_rate, label_path, label_rate, inds, tot
)
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, "
f"normalize={normalize}, max_sample_size={self.max_sample_size}"
)
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav = self.get_audio(index)
labels = self.get_labels(index)
return {"id": index, "source": wav, "label_list": labels}
def __len__(self):
return len(self.sizes)
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end], start
def collater(self, samples):
# target = max(sizes) -> random_crop not used
# target = max_sample_size -> random_crop used for long
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
collated_audios, padding_mask, audio_starts = self.collater_audio(
audios, audio_size
)
targets_by_label = [
[s["label_list"][i] for s in samples] for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
net_input = {"source": collated_audios, "padding_mask": padding_mask}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
# if self.pad_audio else None
)
audio_starts = [0 for _ in audios]
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)])
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size
)
return collated_audios, padding_mask, audio_starts
def collater_frm_label(self, targets, audio_size, audio_starts, label_rate, pad):
assert label_rate > 0
s2f = label_rate / self.sample_rate
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s: s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1.0:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| 13,124 | 35.256906 | 86 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/clip_dataloader/flickr.py
|
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from transformers import BertTokenizer
import pytorch_lightning as pl
from PIL import Image
import os
class flickr30k_CNA(Dataset):
def __init__(self, img_root_path,
annot_path,
transform=None):
self.images = []
self.captions = []
self.labels = []
self.root = img_root_path
with open(annot_path, 'r') as f:
for line in f:
line = line.strip().split('\t')
key, caption = line[0].split('#')[0], line[1]
img_path = key + '.jpg'
self.images.append(img_path)
self.captions.append(caption)
self.labels.append(key)
self.transforms = transform
self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext")
# NOTE large 模型
self.context_length = 77
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_path = str(self.images[idx])
image = self.transforms(Image.open(os.path.join(self.root, img_path)))
text = self.tokenizer(str(self.captions[idx]), max_length=self.context_length,
padding='max_length', truncation=True, return_tensors='pt')['input_ids'][0]
label = self.labels[idx]
return image, text, label
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
class FlickrDataModule(pl.LightningDataModule):
def __init__(self, args):
self.batch_size = args.batch_size
self.train_filename = args.train_filename # NOTE 标注的文件夹
self.train_root = args.train_root # NOTE 图片地址
self.val_filename = args.val_filename
self.val_root = args.val_root
self.test_filename = args.test_filename
self.test_root = args.test_root
self.pretrain_model = args.pretrain_model
self.image_size = 224
self.prepare_data_per_node = True
self._log_hyperparams = False
self.num_workers = args.num_workers
def setup(self, stage=None):
# dataset
train_transform = image_transform(224, True)
val_transform = image_transform(224, False)
test_transform = image_transform(224, False)
self.train_dataset = flickr30k_CNA(self.train_root, self.train_filename, transform=train_transform)
self.val_dataset = flickr30k_CNA(self.val_root, self.val_filename, transform=val_transform)
self.test_dataset = flickr30k_CNA(self.test_root, self.test_filename, transform=test_transform)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers)
| 3,812 | 34.971698 | 112 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/bart_dataset.py
|
"""BART Style dataset. Modified from fairseq."""
import numpy as np
import torch
import math
import re
from fengshen.data.megatron_dataloader.dataset_utils import (
get_samples_mapping
)
class BartDataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed, tokenizer, zh_tokenizer):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length - 3, # account for added tokens
short_seq_prob,
self.seed,
self.name,
False)
# Vocab stuff.
self.vocab_size = tokenizer.vocab_size
inv_vocab = {v: k for k, v in tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
self.tokenizer = tokenizer
seg_tokens = ['。', ';', ';', '!', '!', '?', '?']
seg_token_ids = []
for t in seg_tokens:
if t in tokenizer.vocab:
seg_token_ids.append(tokenizer.vocab[t])
else:
print('seg_token "{}" not in vocab'.format(t))
self.seg_token_ids = set(seg_token_ids)
self.zh_tokenizer = zh_tokenizer
# Denoising ratios
self.permute_sentence_ratio = 1.0
self.mask_ratio = masked_lm_prob # 0.15
self.random_ratio = 0.1
self.insert_ratio = 0.0
self.rotate_ratio = 0.0
self.mask_whole_word = 1
self.item_transform_func = None
self.mask_span_distribution = None
if False:
_lambda = 3 # Poisson lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
return self.build_training_sample(sample, self.max_seq_length, np_rng)
def build_training_sample(self, sample, max_seq_length, np_rng):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
max_seq_length: Desired sequence length.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
"""
# permute sentences
full_stops = []
tokens = [self.cls_id]
for sent in sample:
for t in sent:
token = self.vocab_id_to_token_dict[t]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
# 兼容erlangshen ##的方式做whole word mask
t = self.tokenizer.convert_tokens_to_ids(token[2:])
tokens.append(t)
if t in self.seg_token_ids:
tokens.append(self.sep_id)
if tokens[-1] != self.sep_id:
tokens.append(self.sep_id)
if len(tokens) > max_seq_length:
tokens = tokens[:max_seq_length]
tokens[-1] = self.sep_id
tokens = torch.LongTensor(tokens)
full_stops = (tokens == self.sep_id).long()
assert (max_seq_length - tokens.shape[0]) >= 0, (tokens.size(), tokens[-1], max_seq_length)
source, target = tokens, tokens[1:].clone()
use_decoder = 1
# if torch.rand(1).item() < 0.5:
# use_decoder = 0
if self.permute_sentence_ratio > 0.0 and use_decoder == 1:
source = self.permute_sentences(source, full_stops, self.permute_sentence_ratio)
if self.mask_ratio > 0.0:
replace_length = 1 if use_decoder else -1
mask_ratio = self.mask_ratio * 2 if use_decoder else self.mask_ratio
source = self.add_whole_word_mask(source, mask_ratio, replace_length)
if self.insert_ratio > 0.0:
raise NotImplementedError
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
raise NotImplementedError
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
# assert (source[1:-1] >= 1).all()
assert (source <= self.vocab_size).all()
assert source[0] == self.cls_id
assert source[-1] == self.sep_id
# tokenizer = get_tokenizer()
# print(' '.join(tokenizer.tokenizer.convert_ids_to_tokens(source)))
# print(tokenizer.detokenize(target))
# print(tokenizer.detokenize(source))
# print()
prev_output_tokens = torch.zeros_like(target)
prev_output_tokens[0] = self.sep_id # match the preprocessing in fairseq
prev_output_tokens[1:] = target[:-1]
# src_padding_length = max_seq_length - source.shape[0]
# tgt_padding_length = max_seq_length - target.shape[0]
# assert src_padding_length >= 0, (source.size(), source[-1], max_seq_length)
# assert tgt_padding_length >= 0, (target.size(), target[-1], max_seq_length)
source_ = torch.full((max_seq_length,), self.pad_id, dtype=torch.long)
source_[:source.shape[0]] = source
target_ = torch.full((max_seq_length,), -100, dtype=torch.long)
# decoder not need bos in the front
target_[:target.shape[0]] = target
prev_output_tokens_ = torch.full((max_seq_length,), self.pad_id, dtype=torch.long)
prev_output_tokens_[:prev_output_tokens.shape[0]] = prev_output_tokens
return {
"input_ids": source_,
"labels": target_,
# "decoder_input_ids": prev_output_tokens_,
"attention_mask": (source_ != self.pad_id).long()
}
def permute_sentences(self, source, full_stops, p=1.0):
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1): sentence_ends[i]]
result[index: index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts_en(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def word_starts(self, source):
if self.mask_whole_word is None:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
raw_tokens = [self.vocab_id_to_token_dict[i] for i in source.tolist()]
words = [raw_tokens[0]] + \
self.zh_tokenizer(''.join(raw_tokens[1:-1]), HMM=True) + [raw_tokens[-1]]
def _is_chinese_char(c):
"""Checks whether CP is the #codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if len(c) > 1:
return all([_is_chinese_char(c_i) for c_i in c])
cp = ord(c)
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def align_linear(atokens, btokens):
a2c = []
c2b = []
a2b = []
length = 0
for tok in atokens:
a2c.append([length + i for i in range(len(tok))])
length += len(tok)
for i, tok in enumerate(btokens):
c2b.extend([i for _ in range(len(tok))])
for i, amap in enumerate(a2c):
bmap = [c2b[ci] for ci in amap]
a2b.append(list(set(bmap)))
return a2b
raw_to_word_align = align_linear(raw_tokens, words)
is_word_start = torch.zeros(source.size())
word_starts = []
skip_cur_word = True
for i in range(1, len(raw_to_word_align)):
if raw_to_word_align[i-1] == raw_to_word_align[i]:
# not a word start, as they align to the same word
if not skip_cur_word and not _is_chinese_char(raw_tokens[i]):
word_starts.pop(-1)
skip_cur_word = True
continue
else:
is_word_start[i] = 1
if _is_chinese_char(raw_tokens[i]):
word_starts.append(i)
skip_cur_word = False
is_word_start[0] = 0
is_word_start[-1] = 0
word_starts = torch.tensor(word_starts).long().view(-1, 1)
return is_word_start, word_starts
def add_whole_word_mask(self, source, p, replace_length=1):
is_word_start, word_starts = self.word_starts(source)
num_to_mask_word = int(math.ceil(word_starts.size(0) * p))
num_to_mask_char = int(math.ceil(word_starts.size(0) * p * 0.1))
num_to_mask = num_to_mask_word + num_to_mask_char
if num_to_mask > word_starts.size(0):
word_starts = is_word_start.nonzero(as_tuple=False)
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
# print(source.size(), word_starts.size(), indices.size(), mask_random.size())
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
# sorted_indices = torch.sort(indices)[0]
# continue_mask_pos = ((sorted_indices + 1)[:-1] == sorted_indices[1:])
# continue_mask_indices = sorted_indices[1:][continue_mask_pos]
# to_keep[continue_mask_indices] = 0
# for char indices, we already masked, the following loop handles word mask
indices = indices[:num_to_mask_word]
mask_random = mask_random[:num_to_mask_word]
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_id
source[indices[mask_random]] = torch.randint(
1, self.vocab_size, size=(mask_random.sum(),)
)
assert source_length - 1 not in indices
source = source[to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_id
result[noise_indices[:num_random]] = torch.randint(
low=1, high=self.vocab_size, size=(num_random,)
)
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
| 18,396 | 40.434685 | 103 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/dataset_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, and NVIDIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
import math
import time
import collections
import numpy as np
import re
from fengshen.data.megatron_dataloader.utils import (
print_rank_0
)
from fengshen.data.megatron_dataloader.blendable_dataset import BlendableDataset
from fengshen.data.megatron_dataloader.indexed_dataset import make_dataset as make_indexed_dataset
DSET_TYPE_BERT = 'standard_bert'
DSET_TYPE_ICT = 'ict'
DSET_TYPE_T5 = 't5'
DSET_TYPE_BERT_CN_WWM = 'bert_cn_wwm'
DSET_TYPE_BART = 'bart'
DSET_TYPE_COCOLM = 'coco_lm'
DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT,
DSET_TYPE_T5, DSET_TYPE_BERT_CN_WWM,
DSET_TYPE_BART, DSET_TYPE_COCOLM]
def get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples):
# The data prefix should be in the format of:
# weight-1, data-prefix-1, weight-2, data-prefix-2, ..
assert len(data_prefix) % 2 == 0
num_datasets = len(data_prefix) // 2
weights = [0] * num_datasets
prefixes = [0] * num_datasets
for i in range(num_datasets):
weights[i] = float(data_prefix[2 * i])
prefixes[i] = (data_prefix[2 * i + 1]).strip()
# Normalize weights
weight_sum = 0.0
for weight in weights:
weight_sum += weight
assert weight_sum > 0.0
weights = [weight / weight_sum for weight in weights]
# Add 0.5% (the 1.005 factor) so in case the bleding dataset does
# not uniformly distribute the number of samples, we still have
# samples left to feed to the network.
datasets_train_valid_test_num_samples = []
for weight in weights:
datasets_train_valid_test_num_samples.append(
[int(math.ceil(val * weight * 1.005))
for val in train_valid_test_num_samples])
return prefixes, weights, datasets_train_valid_test_num_samples
def compile_helper():
"""Compile helper function ar runtime. Make sure this
is invoked on a single process."""
import os
import subprocess
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(['make', '-C', path])
if ret.returncode != 0:
print("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
# print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(tokens,
vocab_id_list, vocab_id_to_token_dict,
masked_lm_prob,
cls_id, sep_id, mask_id,
max_predictions_per_seq,
np_rng,
tokenizer,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
masking_style="bert",
zh_tokenizer=None):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
# 如果没有指定中文分词器,那就直接按##算
if zh_tokenizer is None:
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(vocab_id_to_token_dict[token])):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
else:
# 如果指定了中文分词器,那就先用分词器分词,然后再进行判断
# 获取去掉CLS SEP的原始文本
raw_tokens = []
for t in tokens:
if t != cls_id and t != sep_id:
raw_tokens.append(t)
raw_tokens = [vocab_id_to_token_dict[i] for i in raw_tokens]
# 分词然后获取每次字开头的最长词的长度
word_list = set(zh_tokenizer(''.join(raw_tokens), HMM=True))
word_length_dict = {}
for w in word_list:
if len(w) < 1:
continue
if w[0] not in word_length_dict:
word_length_dict[w[0]] = len(w)
elif word_length_dict[w[0]] < len(w):
word_length_dict[w[0]] = len(w)
i = 0
# 从词表里面检索
while i < len(tokens):
token_id = tokens[i]
token = vocab_id_to_token_dict[token_id]
if len(token) == 0 or token_id == cls_id or token_id == sep_id:
token_boundary[i] = 1
i += 1
continue
word_max_length = 1
if token[0] in word_length_dict:
word_max_length = word_length_dict[token[0]]
j = 0
word = ''
word_end = i+1
# 兼容以前##的形式,如果后面的词是##开头的,那么直接把后面的拼到前面当作一个词
old_style = False
while word_end < len(tokens) and vocab_id_to_token_dict[tokens[word_end]].startswith('##'):
old_style = True
word_end += 1
if not old_style:
while j < word_max_length and i+j < len(tokens):
cur_token = tokens[i+j]
word += vocab_id_to_token_dict[cur_token]
j += 1
if word in word_list:
word_end = i+j
cand_indexes.append([p for p in range(i, word_end)])
token_boundary[i] = 1
i = word_end
output_tokens = list(tokens)
# add by ganruyi
if masking_style == 'bert-cn-wwm':
# if non chinese is False, that means it is chinese
# then try to remove "##" which is added previously
new_token_ids = []
for token_id in output_tokens:
token = tokenizer.convert_ids_to_tokens([token_id])[0]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[
0]
new_token_ids.append(new_token_id)
output_tokens = new_token_ids
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions,
masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1. / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
# 获取一个ngram的idx,对于每个word,记录他的ngram的word
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == 'bert-cn-wwm':
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
# 如果是中文全词mask,去掉tokens里的##
token_id = tokens[index]
token = tokenizer.convert_ids_to_tokens([token_id])[
0]
if len(re.findall('##[\u4E00-\u9FA5]', token)) > 0:
token = token[2:]
new_token_id = tokenizer.convert_tokens_to_ids([token])[
0]
masked_token = new_token_id
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(
0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(
index=index, label=tokens[index]))
masked_spans.append(MaskedLmInstance(
index=index_set,
label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(
index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans)
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob, short_seq_prob, seed,
tokenizer,
skip_warmup, binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert',
zh_tokenizer=None,
span=None):
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length, masked_lm_prob,
short_seq_prob, seed,
skip_warmup,
binary_head,
max_seq_length_dec,
tokenizer,
dataset_type=dataset_type,
zh_tokenizer=zh_tokenizer,
span=span)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], data_impl, splits_string,
datasets_train_valid_test_num_samples[i],
max_seq_length, masked_lm_prob, short_seq_prob,
seed, skip_warmup, binary_head, max_seq_length_dec,
tokenizer, dataset_type=dataset_type, zh_tokenizer=zh_tokenizer)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob, short_seq_prob, seed,
skip_warmup, binary_head,
max_seq_length_dec,
tokenizer,
dataset_type='standard_bert',
zh_tokenizer=None,
span=None):
if dataset_type not in DSET_TYPES:
raise ValueError("Invalid dataset_type: ", dataset_type)
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
print_rank_0(' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index,
end_index - start_index))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
from fengshen.data.megatron_dataloader.bert_dataset import BertDataset
from fengshen.data.megatron_dataloader.bart_dataset import BartDataset
from fengshen.data.megatron_dataloader.cocolm_dataset import COCOLMDataset
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
# Build the dataset accordingly.
kwargs = dict(
name=name,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=train_valid_test_num_samples[index],
max_seq_length=max_seq_length,
seed=seed,
)
if dataset_type == DSET_TYPE_BERT or dataset_type == DSET_TYPE_BERT_CN_WWM:
dataset = BertDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
binary_head=binary_head,
# 增加参数区分bert和bert-cn-wwm
tokenizer=tokenizer,
masking_style='bert' if dataset_type == DSET_TYPE_BERT else 'bert-cn-wwm',
**kwargs
)
elif dataset_type == DSET_TYPE_BART:
dataset = BartDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
tokenizer=tokenizer,
zh_tokenizer=zh_tokenizer,
**kwargs
)
elif dataset_type == DSET_TYPE_COCOLM:
dataset = COCOLMDataset(
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
tokenizer=tokenizer,
masking_style='bert',
span=span,
**kwargs
)
else:
raise NotImplementedError(
"Dataset type not fully implemented.")
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == \
(total_num_of_documents + 1)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' > indexed dataset stats:')
print_rank_0(' number of documents: {}'.format(
indexed_dataset.doc_idx.shape[0] - 1))
print_rank_0(' number of sentences: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] +
int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def get_samples_mapping(indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name,
binary_head):
"""Get a list that maps a sample index to a starting
sentence index, end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
# ganruyi comment
# counts = torch.cuda.LongTensor([1])
# torch.distributed.all_reduce(
# counts, group=mpu.get_data_parallel_group())
# torch.distributed.all_reduce(
# counts, group=mpu.get_pipeline_model_parallel_group())
# assert counts[0].item() == (
# torch.distributed.get_world_size() //
# torch.distributed.get_world_size(
# group=mpu.get_tensor_model_parallel_group()))
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
samples_mapping = np.load(
indexmap_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
samples_mapping.shape[0]))
return samples_mapping
| 30,965 | 38.247148 | 103 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/utils.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def print_rank_0(message):
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
| 903 | 35.16 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/bert_dataset.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Style dataset."""
import numpy as np
import torch
from fengshen.data.megatron_dataloader.dataset_utils import (
get_samples_mapping,
get_a_and_b_segments,
create_masked_lm_predictions,
create_tokens_and_tokentypes,
)
class BertDataset(torch.utils.data.Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed, binary_head, tokenizer, masking_style):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.short_seq_prob = short_seq_prob
self.binary_head = binary_head
self.masking_style = masking_style
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
# account for added tokens
self.max_seq_length - 3,
short_seq_prob,
self.seed,
self.name,
self.binary_head)
inv_vocab = {v: k for k, v in tokenizer.vocab.items()}
self.vocab_id_list = list(inv_vocab.keys())
self.vocab_id_to_token_dict = inv_vocab
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
self.tokenizer = tokenizer
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32))
return build_training_sample(sample, seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng,
self.binary_head,
tokenizer=self.tokenizer,
masking_style=self.masking_style)
def build_training_sample(sample,
target_seq_length, max_seq_length,
vocab_id_list, vocab_id_to_token_dict,
cls_id, sep_id, mask_id, pad_id,
masked_lm_prob, np_rng, binary_head,
tokenizer,
masking_style='bert'):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
"""
if binary_head:
# We assume that we have at least two sentences in the sample
assert len(sample) > 1
assert target_seq_length <= max_seq_length
# Divide sample into two segments (A and B).
if binary_head:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample,
np_rng)
else:
tokens_a = []
for j in range(len(sample)):
tokens_a.extend(sample[j])
tokens_b = []
is_next_random = False
if len(tokens_a) >= max_seq_length-3:
tokens_a = tokens_a[:max_seq_length-3]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
''''
truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a),
len(tokens_b), max_num_tokens, np_rng)
'''
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b,
cls_id, sep_id)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob,
cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng,
tokenizer=tokenizer,
masking_style=masking_style)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \
= pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length)
train_sample = {
'input_ids': tokens_np,
'token_type_ids': tokentypes_np,
'labels': labels_np,
'next_sentence_label': int(is_next_random),
'attention_mask': padding_mask_np}
return train_sample
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length,
dtype=np.int64)
# Lables and loss mask.
labels = [-100] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
| 8,121 | 40.228426 | 94 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/blendable_dataset.py
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Blendable dataset."""
import time
import numpy as np
import torch
from fengshen.data.megatron_dataloader.utils import print_rank_0
class BlendableDataset(torch.utils.data.Dataset):
def __init__(self, datasets, weights):
self.datasets = datasets
num_datasets = len(datasets)
assert num_datasets == len(weights)
self.size = 0
for dataset in self.datasets:
self.size += len(dataset)
# Normalize weights.
weights = np.array(weights, dtype=np.float64)
sum_weights = np.sum(weights)
assert sum_weights > 0.0
weights /= sum_weights
# Build indecies.
start_time = time.time()
assert num_datasets < 255
self.dataset_index = np.zeros(self.size, dtype=np.uint8)
self.dataset_sample_index = np.zeros(self.size, dtype=np.int64)
from fengshen.data.megatron_dataloader import helpers
helpers.build_blending_indices(self.dataset_index,
self.dataset_sample_index,
weights, num_datasets, self.size,
torch.distributed.get_rank() == 0)
print_rank_0('> elapsed time for building blendable dataset indices: '
'{:.2f} (sec)'.format(time.time() - start_time))
def __len__(self):
return self.size
def __getitem__(self, idx):
dataset_idx = self.dataset_index[idx]
sample_idx = self.dataset_sample_index[idx]
return self.datasets[dataset_idx][sample_idx]
| 2,208 | 32.984615 | 78 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/__init__.py
|
from . import indexed_dataset
| 30 | 14.5 | 29 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/megatron_dataloader/indexed_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
from fengshen.data.megatron_dataloader.utils import print_rank_0
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and "
".bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file,
dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx "
"and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[
self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError(
"Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[
self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(
self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(
self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print_rank_0(" reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer,
dtype=np.int32,
count=self._len,
offset=offset)
print_rank_0(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer,
dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
print_rank_0(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64, count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print_rank_0(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode='r', order='C')
print_rank_0(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError(
"Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype,
count=length, offset=ptr)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(
data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb', buffering=5000000)
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| 18,859 | 31.1843 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/sop_utils.py
|
# copy from megatron
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
| 912 | 26.666667 | 79 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/common_utils.py
|
def padding_to_maxlength(ids, max_length, pad_id):
cur_len = len(ids)
len_diff = max_length - len(ids)
return ids + [pad_id] * len_diff, [1] * cur_len + [0] * len_diff
| 180 | 35.2 | 68 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/truncate_utils.py
|
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
# print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
| 579 | 28 | 80 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/token_type_utils.py
|
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
| 639 | 23.615385 | 75 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/sentence_split.py
|
import re
class ChineseSentenceSplitter(object):
def merge_symmetry(self, sentences, symmetry=('“', '”')):
# '''合并对称符号,如双引号'''
effective_ = []
merged = True
for index in range(len(sentences)):
if symmetry[0] in sentences[index] and symmetry[1] not in sentences[index]:
merged = False
effective_.append(sentences[index])
elif symmetry[1] in sentences[index] and not merged:
merged = True
effective_[-1] += sentences[index]
elif symmetry[0] not in sentences[index] and symmetry[1] not in sentences[index] and not merged:
effective_[-1] += sentences[index]
else:
effective_.append(sentences[index])
return [i.strip() for i in effective_ if len(i.strip()) > 0]
def to_sentences(self, paragraph):
# """由段落切分成句子"""
sentences = re.split(r"(?|。|[!]+|!|\…\…)", paragraph)
sentences.append("")
sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]
sentences = [i.strip() for i in sentences if len(i.strip()) > 0]
for j in range(1, len(sentences)):
if sentences[j][0] == '”':
sentences[j-1] = sentences[j-1] + '”'
sentences[j] = sentences[j][1:]
return self.merge_symmetry(sentences)
def tokenize(self, text):
return self.to_sentences(text)
| 1,457 | 39.5 | 108 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/data_utils/mask_utils.py
|
import collections
import numpy as np
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(tokens,
vocab_id_list, vocab_id_to_token_dict,
masked_lm_prob,
cls_id, sep_id, mask_id,
max_predictions_per_seq,
np_rng,
max_ngrams=3,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
masking_style="bert",
zh_tokenizer=None):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
'''
modified from Megatron-LM
Args:
tokens: 输入
vocab_id_list: 词表token_id_list
vocab_id_to_token_dict: token_id到token字典
masked_lm_prob:mask概率
cls_id、sep_id、mask_id:特殊token
max_predictions_per_seq:最大mask个数
np_rng:mask随机数
max_ngrams:最大词长度
do_whole_word_mask:是否做全词掩码
favor_longer_ngram:优先用长的词
do_permutation:是否打乱
geometric_dist:用np_rng.geometric做随机
masking_style:mask类型
zh_tokenizer:WWM的分词器,比如用jieba.lcut做分词之类的
'''
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
# 如果没有指定中文分词器,那就直接按##算
if zh_tokenizer is None:
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(vocab_id_to_token_dict[token])):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
else:
# 如果指定了中文分词器,那就先用分词器分词,然后再进行判断
# 获取去掉CLS SEP的原始文本
raw_tokens = []
for t in tokens:
if t != cls_id and t != sep_id:
raw_tokens.append(t)
raw_tokens = [vocab_id_to_token_dict[i] for i in raw_tokens]
# 分词然后获取每次字开头的最长词的长度
word_list = set(zh_tokenizer(''.join(raw_tokens), HMM=True))
word_length_dict = {}
for w in word_list:
if len(w) < 1:
continue
if w[0] not in word_length_dict:
word_length_dict[w[0]] = len(w)
elif word_length_dict[w[0]] < len(w):
word_length_dict[w[0]] = len(w)
i = 0
# 从词表里面检索
while i < len(tokens):
token_id = tokens[i]
token = vocab_id_to_token_dict[token_id]
if len(token) == 0 or token_id == cls_id or token_id == sep_id:
token_boundary[i] = 1
i += 1
continue
word_max_length = 1
if token[0] in word_length_dict:
word_max_length = word_length_dict[token[0]]
j = 0
word = ''
word_end = i+1
# 兼容以前##的形式,如果后面的词是##开头的,那么直接把后面的拼到前面当作一个词
old_style = False
while word_end < len(tokens) and vocab_id_to_token_dict[tokens[word_end]].startswith('##'):
old_style = True
word_end += 1
if not old_style:
while j < word_max_length and i+j < len(tokens):
cur_token = tokens[i+j]
word += vocab_id_to_token_dict[cur_token]
j += 1
if word in word_list:
word_end = i+j
cand_indexes.append([p for p in range(i, word_end)])
token_boundary[i] = 1
i = word_end
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions,
masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1. / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
# 获取一个ngram的idx,对于每个word,记录他的ngram的word
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
token_id = tokens[index]
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=token_id))
masked_spans.append(MaskedLmInstance(
index=index_set,
label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans)
| 11,400 | 38.863636 | 103 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/sequence_tagging_dataloader/sequence_tagging_collator.py
|
from dataclasses import dataclass
from torch.utils.data._utils.collate import default_collate
import copy
import torch
import numpy as np
@dataclass
class CollatorForLinear:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
features=[]
for (ex_index, example) in enumerate(samples):
tokens = copy.deepcopy(example['text_a'])
label_ids = [self.label2id[x] for x in example['labels']]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
label_ids = label_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [self.label2id["O"]]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
label_ids = [self.label2id["O"]] + label_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(label_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
label_ids += [pad_token] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(label_ids) == self.args.max_seq_length
features.append({
'input_ids':torch.tensor(input_ids),
'attention_mask':torch.tensor(input_mask),
'input_len':torch.tensor(input_len),
'token_type_ids':torch.tensor(segment_ids),
'labels':torch.tensor(label_ids),
})
return default_collate(features)
@dataclass
class CollatorForCrf:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
for (ex_index, example) in enumerate(samples):
tokens = copy.deepcopy(example['text_a'])
label_ids = [self.label2id[x] for x in example['labels']]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
label_ids = label_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
label_ids += [self.label2id["O"]]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
label_ids = [self.label2id["O"]] + label_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(label_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
label_ids += [pad_token] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(label_ids) == self.args.max_seq_length
features.append({
'input_ids':torch.tensor(input_ids),
'attention_mask':torch.tensor(input_mask),
'input_len':torch.tensor(input_len),
'token_type_ids':torch.tensor(segment_ids),
'labels':torch.tensor(label_ids),
})
return default_collate(features)
@dataclass
class CollatorForSpan:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
max_entities_count = 100
segment_id = 0
for (ex_index, example) in enumerate(samples):
subjects = copy.deepcopy(example['subject'])
tokens = copy.deepcopy(example['text_a'])
start_ids = [0] * len(tokens)
end_ids = [0] * len(tokens)
subject_ids = []
for subject in subjects:
label = subject[0]
start = subject[1]
end = subject[2]
start_ids[start] = self.label2id[label]
end_ids[end] = self.label2id[label]
subject_ids.append([self.label2id[label], start, end])
subject_ids+=[[-1,-1,-1]]*(max_entities_count-len(subject_ids))
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
start_ids = start_ids[: (self.args.max_seq_length - special_tokens_count)]
end_ids = end_ids[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
start_ids += [0]
end_ids += [0]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
start_ids = [0] + start_ids
end_ids = [0] + end_ids
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
input_len = len(input_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
start_ids += [0] * padding_length
end_ids += [0] * padding_length
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(start_ids) == self.args.max_seq_length
assert len(end_ids) == self.args.max_seq_length
features.append({
'input_ids': torch.tensor(np.array(input_ids)),
'attention_mask': torch.tensor(np.array(input_mask)),
'token_type_ids': torch.tensor(np.array(segment_ids)),
'start_positions': torch.tensor(np.array(start_ids)),
'end_positions': torch.tensor(np.array(end_ids)),
"subjects": torch.tensor(np.array(subject_ids)),
'input_len': torch.tensor(np.array(input_len)),
})
return default_collate(features)
@dataclass
class CollatorForBiaffine:
args = None
tokenizer = None
label2id = None
def __call__(self, samples):
features = []
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
special_tokens_count = 2
segment_id = 0
for (ex_index, example) in enumerate(samples):
subjects = copy.deepcopy(example['subject'])
tokens = copy.deepcopy(example['text_a'])
span_labels = np.zeros((self.args.max_seq_length,self.args.max_seq_length))
span_labels[:] = self.label2id["O"]
for subject in subjects:
label = subject[0]
start = subject[1]
end = subject[2]
if start < self.args.max_seq_length - special_tokens_count and end < self.args.max_seq_length - special_tokens_count:
span_labels[start + 1, end + 1] = self.label2id[label]
if len(tokens) > self.args.max_seq_length - special_tokens_count:
tokens = tokens[: (self.args.max_seq_length - special_tokens_count)]
tokens += [sep_token]
span_labels[len(tokens), :] = self.label2id["O"]
span_labels[:, len(tokens)] = self.label2id["O"]
segment_ids = [segment_id] * len(tokens)
tokens = [cls_token] + tokens
span_labels[0, :] = self.label2id["O"]
span_labels[:, 0] = self.label2id["O"]
segment_ids = [segment_id] + segment_ids
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [0] * len(input_ids)
span_mask = np.ones(span_labels.shape)
input_len = len(input_ids)
padding_length = self.args.max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0] * padding_length
segment_ids += [segment_id] * padding_length
span_labels[input_len:, :] = 0
span_labels[:, input_len:] = 0
span_mask[input_len:, :] = 0
span_mask[:, input_len:] = 0
span_mask=np.triu(span_mask,0)
span_mask=np.tril(span_mask,10)
assert len(input_ids) == self.args.max_seq_length
assert len(input_mask) == self.args.max_seq_length
assert len(segment_ids) == self.args.max_seq_length
assert len(span_labels) == self.args.max_seq_length
assert len(span_labels[0]) == self.args.max_seq_length
features.append({
'input_ids': torch.tensor(np.array(input_ids)),
'attention_mask': torch.tensor(np.array(input_mask)),
'token_type_ids': torch.tensor(np.array(segment_ids)),
'span_labels': torch.tensor(np.array(span_labels)),
'span_mask': torch.tensor(np.array(span_mask)),
'input_len': torch.tensor(np.array(input_len)),
})
return default_collate(features)
| 10,403 | 36.970803 | 133 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/sequence_tagging_dataloader/sequence_tagging_datasets.py
|
from torch.utils.data import Dataset
from fengshen.metric.utils_ner import get_entities
import os
def get_datasets(args):
processor = DataProcessor(args.data_dir, args.decode_type)
train_data = TaskDataset(processor=processor, mode="train")
valid_data = TaskDataset(processor=processor, mode="dev")
test_data = TaskDataset(processor=processor, mode="dev")
return {"train":train_data,"validation":valid_data,"test":test_data}
# def get_labels(decode_type):
# with open("/cognitive_comp/lujunyu/data_zh/NER_Aligned/weibo/labels.txt") as f:
# label_list = ["[PAD]", "[START]", "[END]"]
# if decode_type=="crf" or decode_type=="linear":
# for line in f.readlines():
# label_list.append(line.strip())
# elif decode_type=="biaffine" or decode_type=="span":
# for line in f.readlines():
# tag = line.strip().split("-")
# if len(tag) == 1 and tag[0] not in label_list:
# label_list.append(tag[0])
# elif tag[1] not in label_list:
# label_list.append(tag[1])
# label2id={label:id for id,label in enumerate(label_list)}
# id2label={id:label for id,label in enumerate(label_list)}
# return label2id, id2label
class DataProcessor(object):
def __init__(self, data_dir, decode_type) -> None:
super().__init__()
self.data_dir = data_dir
self.decode_type = decode_type
def get_examples(self, mode):
return self._create_examples(self._read_text(os.path.join(self.data_dir, mode + ".all.bmes")), mode)
@staticmethod
def get_labels(args):
with open(os.path.join(args.data_dir, "labels.txt")) as f:
label_list = ["[PAD]", "[START]", "[END]"]
if args.decode_type=="crf" or args.decode_type=="linear":
for line in f.readlines():
label_list.append(line.strip())
elif args.decode_type=="biaffine" or args.decode_type=="span":
for line in f.readlines():
tag = line.strip().split("-")
if len(tag) == 1 and tag[0] not in label_list:
label_list.append(tag[0])
elif tag[1] not in label_list:
label_list.append(tag[1])
label2id = {label: i for i, label in enumerate(label_list)}
id2label={id:label for id,label in enumerate(label_list)}
return label2id,id2label
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['words']
labels = []
for x in line['labels']:
if 'M-' in x:
labels.append(x.replace('M-', 'I-'))
else:
labels.append(x)
subject = get_entities(labels, id2label=None, markup='bioes')
examples.append({'guid':guid, 'text_a':text_a, 'labels':labels, 'subject':subject})
return examples
@classmethod
def _read_text(self, input_file):
lines = []
with open(input_file, 'r') as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
lines.append({"words": words, "labels": labels})
words = []
labels = []
else:
splits = line.split()
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
lines.append({"words": words, "labels": labels})
return lines
class TaskDataset(Dataset):
def __init__(self, processor, mode='train'):
super().__init__()
self.data = self.load_data(processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, processor, mode):
examples = processor.get_examples(mode)
return examples
| 4,409 | 37.017241 | 108 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/bert_dataloader/preprocessing.py
|
import re
import json
import multiprocessing
from tqdm import tqdm
from pathlib import Path
from itertools import chain
_SPLIT_DATA_PATH = '/data1/datas/wudao_180g'
def cut_sent(path):
"""
中文分句,默认?、。、!、省略号分句,考虑双引号包裹的句子
采用分割替换的方式
"""
path = Path(path)
# print(path)
save_path = str(Path('/data1/datas/wudao_180g_split', path.name))
print('处理文件:', save_path)
with open(save_path, 'wt', encoding='utf-8') as w:
with open(path, 'rt', encoding='utf-8') as f:
for para in tqdm(f):
para = json.loads(para)
para_ = para['text'] + ' '
# print('sentence piece......')
# pep8中 正则不能些 \? 要写成\\?
para_ = re.sub('([?。!\\?\\!…]+)([^”’]|[”’])',
r'\1#####\2', para_)
para_ = re.sub('([\\.]{3,})([^”’])', r'\1#####\2', para_)
# 匹配 \1: 句子结束符紧挨’” \2: 非句子结束符号,被引号包裹的句子
para_ = re.sub(
'([。!?\\?\\!…][”’])([^,。!?\\?\\!]|\\s)', r'\1#####\2', para_)
para_ = re.sub(
'([\\.]{3,}[”’])([^,。!?\\?\\!]|\\s)', r'\1#####\2', para_)
para_ = re.sub(
'([#]{5})([”’])([^,。!?\\?\\!])', r'\2#####\3', para_)
para_ = para_.strip()
# 一个512里面多个样本
line_ = ''
for line in para_.split('#####'):
line = line.strip()
if len(line_) < 512 and len(line) > 0:
line_ += line
else:
w.writelines(json.dumps(
{'text': line_}, ensure_ascii=False)+'\n')
line_ = line
w.writelines(json.dumps(
{'text': line_}, ensure_ascii=False)+'\n')
def chain_iter(*filenames):
"""
将多个文件读成一个迭代器
"""
reader = [open(file, 'r') for file in filenames]
return chain(*reader)
class Config(object):
def __init__(self, data_path=_SPLIT_DATA_PATH, num_worker=16, split_numb=600000, cut_sentence=True, output_file=None) -> None:
self.data_path = Path(data_path)
self.num_worker = num_worker
self.split_numb = split_numb
self.cut_sentence = cut_sentence
def processing1():
args = Config()
p_ = [str(i) for i in args.data_path.glob('*')]
fin = chain_iter(*p_)
pool = multiprocessing.Pool(args.num_worker)
docs = pool.imap(cut_sent, fin, chunksize=args.num_worker)
if not Path(args.data_path.parent, args.data_path.name+'_split').exists():
Path(args.data_path.parent, args.data_path.name+'_split').mkdir()
writer = open(str(Path(args.data_path.parent, args.data_path.name +
'_split', 'sentence_level.json')), 'wt', encoding='utf-8')
for doc in tqdm(docs):
for sentence in doc:
writer.writelines(json.dumps(
{"text": sentence}, ensure_ascii=False)+'\n')
pool.close()
pool.join()
writer.close()
if __name__ == '__main__':
from time import process_time, perf_counter
from random import shuffle
st = process_time()
args = Config(num_worker=16)
if not Path(args.data_path.parent, args.data_path.name+'_split').exists():
Path(args.data_path.parent, args.data_path.name +
'_split').mkdir(parents=True)
p_ = [str(i) for i in args.data_path.glob('*')]
# 简单shuffle
shuffle(p_)
pool = multiprocessing.Pool(args.num_worker)
for item in p_:
pool.apply_async(func=cut_sent, args=(item,))
pool.close()
pool.join()
cost_time = process_time() - st
print('DONE!! cost time : %.5f' % cost_time)
| 3,724 | 32.558559 | 130 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/bert_dataloader/load.py
|
import os
import re
from pathlib import Path
import glob
from tqdm import tqdm
from contextlib import ExitStack
import datasets
import multiprocessing
from typing import cast, TextIO
from itertools import chain
import json
from concurrent.futures import ProcessPoolExecutor
from random import shuffle
from pytorch_lightning import LightningDataModule
from typing import Optional
from torch.utils.data import DataLoader
# _SPLIT_DATA_PATH = '/data1/datas/wudao_180g_split/test'
_SPLIT_DATA_PATH = '/data1/datas/wudao_180g_split'
_CACHE_SPLIT_DATA_PATH = '/data1/datas/wudao_180g_FSData'
# feats = datasets.Features({"text": datasets.Value('string')})
class BertDataGenerate(object):
def __init__(self,
data_files=_SPLIT_DATA_PATH,
save_path=_CACHE_SPLIT_DATA_PATH,
train_test_validation='950,49,1',
num_proc=1,
cache=True):
self.data_files = Path(data_files)
if save_path:
self.save_path = Path(save_path)
else:
self.save_path = self.file_check(
Path(self.data_files.parent, self.data_files.name+'_FSDataset'),
'save')
self.num_proc = num_proc
self.cache = cache
self.split_idx = self.split_train_test_validation_index(train_test_validation)
if cache:
self.cache_path = self.file_check(
Path(self.save_path.parent, 'FSDataCache', self.data_files.name), 'cache')
else:
self.cache_path = None
@staticmethod
def file_check(path, path_type):
print(path)
if not path.exists():
path.mkdir(parents=True)
print(f"Since no {path_type} directory is specified, the program will automatically create it in {path} directory.")
return str(path)
@staticmethod
def split_train_test_validation_index(train_test_validation):
split_idx_ = [int(i) for i in train_test_validation.split(',')]
idx_dict = {
'train_rate': split_idx_[0]/sum(split_idx_),
'test_rate': split_idx_[1]/sum(split_idx_[1:])
}
return idx_dict
def process(self, index, path):
print('saving dataset shard {}'.format(index))
ds = (datasets.load_dataset('json', data_files=str(path),
cache_dir=self.cache_path,
features=None))
# ds = ds.map(self.cut_sent,input_columns='text')
# print(d)
# print('!!!',ds)
ds = ds['train'].train_test_split(train_size=self.split_idx['train_rate'])
ds_ = ds['test'].train_test_split(train_size=self.split_idx['test_rate'])
ds = datasets.DatasetDict({
'train': ds['train'],
'test': ds_['train'],
'validation': ds_['test']
})
# print('!!!!',ds)
ds.save_to_disk(Path(self.save_path, path.name))
return 'saving dataset shard {} done'.format(index)
def generate_cache_arrow(self) -> None:
'''
生成HF支持的缓存文件,加速后续的加载
'''
data_dict_paths = self.data_files.rglob('*')
p = ProcessPoolExecutor(max_workers=self.num_proc)
res = list()
for index, path in enumerate(data_dict_paths):
res.append(p.submit(self.process, index, path))
p.shutdown(wait=True)
for future in res:
print(future.result(), flush=True)
def load_dataset(num_proc=4, **kargs):
cache_dict_paths = Path(_CACHE_SPLIT_DATA_PATH).glob('*')
ds = []
res = []
p = ProcessPoolExecutor(max_workers=num_proc)
for path in cache_dict_paths:
res.append(p.submit(datasets.load_from_disk,
str(path), **kargs))
p.shutdown(wait=True)
for future in res:
ds.append(future.result())
# print(future.result())
train = []
test = []
validation = []
for ds_ in ds:
train.append(ds_['train'])
test.append(ds_['test'])
validation.append(ds_['validation'])
# ds = datasets.concatenate_datasets(ds)
# print(ds)
return datasets.DatasetDict({
'train': datasets.concatenate_datasets(train),
'test': datasets.concatenate_datasets(test),
'validation': datasets.concatenate_datasets(validation)
})
class BertDataModule(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Universal DataModule')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_batchsize', default=32, type=int)
parser.add_argument('--val_batchsize', default=32, type=int)
parser.add_argument('--test_batchsize', default=32, type=int)
parser.add_argument('--datasets_name', type=str)
# parser.add_argument('--datasets_name', type=str)
parser.add_argument('--train_datasets_field', type=str, default='train')
parser.add_argument('--val_datasets_field', type=str, default='validation')
parser.add_argument('--test_datasets_field', type=str, default='test')
return parent_args
def __init__(
self,
tokenizer,
collate_fn,
args,
**kwargs,
):
super().__init__()
self.datasets = load_dataset(num_proc=args.num_workers)
self.tokenizer = tokenizer
self.collate_fn = collate_fn
self.save_hyperparameters(args)
def setup(self, stage: Optional[str] = None) -> None:
self.train = DataLoader(
self.datasets[self.hparams.train_datasets_field],
batch_size=self.hparams.train_batchsize,
shuffle=True,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
self.val = DataLoader(
self.datasets[self.hparams.val_datasets_field],
batch_size=self.hparams.val_batchsize,
shuffle=False,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
self.test = DataLoader(
self.datasets[self.hparams.test_datasets_field],
batch_size=self.hparams.test_batchsize,
shuffle=False,
num_workers=self.hparams.num_workers,
collate_fn=self.collate_fn,
)
return
def train_dataloader(self):
return self.train
def val_dataloader(self):
return self.val
def test_dataloader(self):
return self.test
if __name__ == '__main__':
# pre = PreProcessing(_SPLIT_DATA_PATH)
# pre.processing()
dataset = BertDataGenerate(_SPLIT_DATA_PATH, num_proc=16)
dataset.generate_cache_arrow()
| 6,756 | 32.616915 | 124 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/base.py
|
_CONFIG_MODEL_TYPE = 'fengshen_model_type'
_CONFIG_TOKENIZER_TYPE = 'fengshen_tokenizer_type'
| 94 | 30.666667 | 50 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/test.py
|
from fengshen.pipelines.text_classification import TextClassificationPipeline
import argparse
from datasets import load_dataset
# 预测 支持批量
# pipe = TextClassificationPipeline(
# model='/data/gaoxinyu/pretrained_model/deberta-base-sp', device=-1)
# print(pipe(['今天心情不好</s>今天很开心', '今天心情很好</s>今天很开心']))
# 训练 支持各种超参调整
total_parser = argparse.ArgumentParser("test")
total_parser = TextClassificationPipeline.add_pipeline_specific_args(total_parser)
args = total_parser.parse_args()
args.gpus=2
datasets = load_dataset('IDEA-CCNL/AFQMC')
pipe = TextClassificationPipeline(
args=args,
model='/cognitive_comp/lujunyu/XinYu/Fengshenbang-LM/fengshen/workspace/bert-base/pretrain', device=-1)
pipe.train(datasets)
| 717 | 33.190476 | 107 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/test_tagging.py
|
from fengshen.pipelines.sequence_tagging import SequenceTaggingPipeline
import argparse
import os
total_parser = argparse.ArgumentParser("test")
total_parser = SequenceTaggingPipeline.add_pipeline_specific_args(total_parser)
args = total_parser.parse_args()
args.data_dir="/cognitive_comp/lujunyu/data_zh/NER_Aligned/weibo"
args.gpus=2
args.max_epochs=30
args.decode_type='linear'
args.learning_rate=3e-5
args.strategy="deepspeed_stage_1"
os.environ["CUDA_VISIBLE_DEVICES"]="5,6"
# pipe = SequenceTaggingPipeline(
# model_path='/cognitive_comp/lujunyu/NER/outputs/ccks_crf/bert/best_checkpoint', args=args)
# print(pipe('李开复的哥哥在中国共产党读书。'))
pipe = SequenceTaggingPipeline(
model_path='/cognitive_comp/lujunyu/XinYu/Fengshenbang-LM/fengshen/workspace/bert-base/pretrain', args=args)
pipe.train()
| 805 | 34.043478 | 112 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/sequence_tagging.py
|
import torch
import torch.nn.functional as F
from torch.utils.data._utils.collate import default_collate
from dataclasses import dataclass
from typing import Dict, List, Union
from fengshen.models.tagging_models.bert_for_tagging import BertLinear,BertCrf,BertSpan,BertBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_collator import CollatorForLinear, CollatorForCrf, CollatorForSpan, CollatorForBiaffine
from fengshen.data.sequence_tagging_dataloader.sequence_tagging_datasets import DataProcessor, get_datasets
from fengshen.metric.metric import EntityScore
from fengshen.metric.utils_ner import get_entities, bert_extract_item
from transformers import (
BertConfig,
AutoTokenizer, BertTokenizer
)
from transformers.models.auto.tokenization_auto import get_tokenizer_config
from transformers.pipelines.base import PipelineException, GenericTensor
from transformers import TokenClassificationPipeline as HuggingfacePipe
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.models.model_utils import add_module_args
from fengshen.models.model_utils import configure_optimizers
from fengshen.models.model_utils import get_total_steps
_model_dict={
'bert-linear': BertLinear,
'bert-crf': BertCrf,
'bert-span': BertSpan,
'bert-biaffine': BertBiaffine
}
_collator_dict={
'linear': CollatorForLinear,
'crf': CollatorForCrf,
'span': CollatorForSpan,
'biaffine': CollatorForBiaffine
}
class _taskModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('sequence tagging task model')
parser.add_argument('--data_dir', default=None, type=str)
parser.add_argument('--model_type', default='bert', type=str)
parser.add_argument("--decode_type", default="linear", choices=["linear", "crf", "biaffine", "span"], type=str)
parser.add_argument('--loss_type', default='ce', type=str)
return parent_args
def __init__(self, args, model, label2id, validate_fn):
super().__init__()
self.label2id = label2id
self.id2label = {v: k for k, v in self.label2id.items()}
self.model=model
self.validate_fn=getattr(self,validate_fn)
self.entity_score=EntityScore()
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
self.validate_fn(batch,batch_idx)
def validation_linear(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_crf(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = self.model.crf.decode(logits, batch['attention_mask'])
preds = preds.detach().squeeze(0).cpu().numpy().tolist()
labels = batch['labels'].detach().cpu().numpy()
for i, label in enumerate(labels):
y_true = []
y_pred = []
for j, m in enumerate(label):
if j == 0:
continue
elif j == (torch.sum(batch['attention_mask'][i]).item()-1):
true_subject=get_entities(y_true,self.id2label)
pred_subject=get_entities(y_pred,self.id2label)
self.entity_score.update(true_subject=true_subject, pred_subject=pred_subject)
break
else:
y_true.append(self.id2label[labels[i][j]])
y_pred.append(self.id2label[preds[i][j]])
self.log('val_loss', loss)
def validation_span(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
start_logits = outputs.start_logits
end_logits = outputs.end_logits
labels=batch['subjects']
for i, T in enumerate(labels):
active_start_logits=start_logits[i][:batch['input_len'][i]]
active_end_logits=end_logits[i][:batch['input_len'][i]]
R = bert_extract_item(active_start_logits, active_end_logits)
T=T[~torch.all(T==-1,dim=-1)].cpu().numpy()
T=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),T))
R=list(map(lambda x:(self.id2label[x[0]],x[1],x[2]),R))
self.entity_score.update(true_subject=T, pred_subject=R)
self.log('val_loss', loss)
def validation_biaffine(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.span_logits
preds = torch.argmax(logits.cpu().numpy(), axis=-1)
labels = batch['span_labels'].cpu().numpy()
for i, label in enumerate(labels):
input_len=(batch['input_len'][i])-2
active_label=labels[i,1:input_len+1,1:input_len+1]
active_pred=preds[i,1:input_len+1,1:input_len+1]
temp_1 = []
temp_2 = []
for j in range(input_len):
for k in range(input_len):
if self.id2label[active_label[j,k]]!="O":
temp_1.append([self.id2label[active_label[j,k]],j,k])
if self.id2label[active_pred[j,k]]!="O":
temp_2.append([self.id2label[active_pred[j,k]],j,k])
self.entity_score.update(pred_subject=temp_2, true_subject=temp_1)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
return configure_optimizers(self)
class SequenceTaggingPipeline(HuggingfacePipe):
@staticmethod
def add_pipeline_specific_args(parent_args):
parser = parent_args.add_argument_group('SequenceTaggingPipeline')
parser.add_argument("--max_seq_length", default=512, type=int)
parser = _taskModel.add_model_specific_args(parent_args)
parser = UniversalDataModule.add_data_specific_args(parent_args)
parser = UniversalCheckpoint.add_argparse_args(parent_args)
parser = pl.Trainer.add_argparse_args(parent_args)
parser = add_module_args(parent_args)
return parent_args
def __init__(self,
model_path: str = None,
args=None,
**kwargs):
_validation_dict={
'linear': 'validation_linear',
'crf': 'validation_crf',
'span': 'validation_span',
'biaffine': 'validation_biaffine',
}
_prediction_dict={
'linear': 'postprocess_linear',
'crf': 'postprocess_crf',
'span': 'postprocess_span',
'biaffine': 'postprocess_biaffine',
}
self.args = args
self.model_name=args.model_type+"-"+args.decode_type
self.label2id,self.id2label=DataProcessor.get_labels(args)
self.config=BertConfig.from_pretrained(model_path)
self.model = _model_dict[self.model_name].from_pretrained(model_path, config=self.config, num_labels=len(self.label2id), loss_type=args.loss_type)
self.tokenizer=BertTokenizer.from_pretrained(model_path)
self.validate_fn = _validation_dict[args.decode_type]
self.predict_fn = getattr(self,_prediction_dict[args.decode_type])
self.collator = _collator_dict[args.decode_type]()
self.collator.args=self.args
self.collator.tokenizer=self.tokenizer
self.collator.label2id=self.label2id
device=-1
super().__init__(model=self.model,
tokenizer=self.tokenizer,
framework='pt',
device=device,
**kwargs)
def check_model_type(self, supported_models: Union[List[str], dict]):
pass
def train(self):
datasets=get_datasets(self.args)
checkpoint_callback = UniversalCheckpoint(self.args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(self.args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = UniversalDataModule(
datasets=datasets,
args=self.args,
collate_fn=self.collator,
tokenizer=self.tokenizer)
model = _taskModel(self.args,self.model,self.label2id,self.validate_fn)
trainer.fit(model,data_model)
def _forward(self, model_inputs):
outputs = self.model(**model_inputs)
return (model_inputs,outputs)
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
samples=[]
labels,subject=["O" for _ in range(len(inputs))],[]
samples.append({"text_a": list(inputs), "labels": labels, "subject":subject})
return self.collator(samples)
def postprocess(self, model_outputs):
return self.predict_fn(model_outputs)
def postprocess_linear(self, model_outputs):
model_inputs,outputs=model_outputs
preds = torch.argmax(F.log_softmax(outputs.logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
pred = preds[0][:model_inputs['input_len'][0]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
return label_entities
def postprocess_crf(self, model_outputs):
model_inputs,outputs=model_outputs
preds = self.model.crf.decode(outputs.logits, model_inputs['attention_mask']).squeeze(0).cpu().numpy().tolist()
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
pred = preds[0][:model_inputs['input_len'][0]][1:-1]
label_entities = get_entities(pred, self.id2label)
for label_list in label_entities:
label_list.append("".join(text[label_list[1]:label_list[2]+1]))
return label_entities
def postprocess_span(self, model_outputs):
model_inputs,outputs=model_outputs
start_logits, end_logits = outputs.start_logits[0], outputs.end_logits[0]
text = self.tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0])[:model_inputs['input_len'][0]][1:-1]
R = bert_extract_item(start_logits[:model_inputs['input_len'][0]], end_logits[:model_inputs['input_len'][0]])
label_entities = [[self.id2label[x[0]],x[1],x[2],"".join(text[x[1]:x[2]+1])] for x in R]
return label_entities
Pipeline = SequenceTaggingPipeline
| 12,608 | 39.156051 | 154 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/text_classification.py
|
import torch
from torch.utils.data._utils.collate import default_collate
from dataclasses import dataclass
from typing import Dict, List
from .base import (
_CONFIG_MODEL_TYPE,
_CONFIG_TOKENIZER_TYPE)
from fengshen.models.roformer import RoFormerForSequenceClassification
from fengshen.models.longformer import LongformerForSequenceClassification
from fengshen.models.zen1 import ZenForSequenceClassification
from transformers import (
BertConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
)
from transformers.models.auto.tokenization_auto import get_tokenizer_config
from transformers.pipelines.base import PipelineException, GenericTensor
from transformers import TextClassificationPipeline as HuggingfacePipe
import pytorch_lightning as pl
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.models.model_utils import add_module_args
import torchmetrics
_model_dict = {
'fengshen-roformer': RoFormerForSequenceClassification,
# 'fengshen-megatron_t5': T5EncoderModel, TODO 实现T5EncoderForSequenceClassification
'fengshen-longformer': LongformerForSequenceClassification,
'fengshen-zen1': ZenForSequenceClassification,
'huggingface-auto': AutoModelForSequenceClassification,
}
_tokenizer_dict = {}
_ATTR_PREPARE_INPUT = '_prepare_inputs_for_sequence_classification'
class _taskModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
_ = parent_args.add_argument_group('text classification task model')
return parent_args
def __init__(self, args, model):
super().__init__()
self.model = model
self.acc_metrics = torchmetrics.Accuracy()
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, _ = outputs[0], outputs[1]
self.log('train_loss', loss)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).long()
acc = self.acc_metrics(y_pred.long(), y_true.long())
return acc
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss, logits = outputs[0], outputs[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
@dataclass
class _Collator:
tokenizer = None
texta_name = 'sentence'
textb_name = 'sentence2'
label_name = 'label'
max_length = 512
model_type = 'huggingface-auto'
def __call__(self, samples):
sample_list = []
for item in samples:
if self.textb_name in item and item[self.textb_name] != '':
if self.model_type != 'fengshen-roformer':
encode_dict = self.tokenizer.encode_plus(
[item[self.texta_name], item[self.textb_name]],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
[item[self.texta_name]+'[SEP]'+item[self.textb_name]],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
item[self.texta_name],
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
sample = {}
for k, v in encode_dict.items():
sample[k] = torch.tensor(v)
if self.label_name in item:
sample['labels'] = torch.tensor(item[self.label_name]).long()
sample_list.append(sample)
return default_collate(sample_list)
class TextClassificationPipeline(HuggingfacePipe):
@staticmethod
def add_pipeline_specific_args(parent_args):
parser = parent_args.add_argument_group('SequenceClassificationPipeline')
parser.add_argument('--texta_name', default='sentence', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--max_length', default=512, type=int)
parser.add_argument('--device', default=-1, type=int)
parser = _taskModel.add_model_specific_args(parent_args)
parser = UniversalDataModule.add_data_specific_args(parent_args)
parser = UniversalCheckpoint.add_argparse_args(parent_args)
parser = pl.Trainer.add_argparse_args(parent_args)
parser = add_module_args(parent_args)
return parent_args
def __init__(self,
model: str = None,
args=None,
**kwargs):
self.args = args
self.model_name = model
self.model_type = 'huggingface-auto'
# 用BertConfig做兼容,我只需要读里面的fengshen_model_type,所以这里用啥Config都可以
config = BertConfig.from_pretrained(model)
if hasattr(config, _CONFIG_MODEL_TYPE):
self.model_type = config.fengshen_model_type
if self.model_type not in _model_dict:
raise PipelineException(self.model_name, ' not in model type dict')
# 加载模型,并且使用模型的config
self.model = _model_dict[self.model_type].from_pretrained(model)
self.config = self.model.config
# 加载分词
tokenizer_config = get_tokenizer_config(model, **kwargs)
self.tokenizer = None
if hasattr(tokenizer_config, _CONFIG_TOKENIZER_TYPE):
if tokenizer_config._CONFIG_TOKENIZER_TYPE in _tokenizer_dict:
self.tokenizer = _tokenizer_dict[tokenizer_config._CONFIG_TOKENIZER_TYPE].from_pretrained(
model)
if self.tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(model)
# 加载数据处理模块
c = _Collator()
c.tokenizer = self.tokenizer
c.model_type = self.model_type
if args is not None:
c.texta_name = self.args.texta_name
c.textb_name = self.args.textb_name
c.label_name = self.args.label_name
c.max_length = self.args.max_length
self.collator = c
device = -1 if args is None else args.device
print(device)
print(kwargs)
super().__init__(model=self.model,
tokenizer=self.tokenizer,
framework='pt',
device=device,
**kwargs)
def train(self,
datasets: Dict):
"""
Args:
datasets is a dict like
{
test: Dataset()
validation: Dataset()
train: Dataset()
}
"""
checkpoint_callback = UniversalCheckpoint(self.args)
trainer = pl.Trainer.from_argparse_args(self.args,
callbacks=[checkpoint_callback]
)
data_model = UniversalDataModule(
datasets=datasets,
tokenizer=self.tokenizer,
collate_fn=self.collator,
args=self.args)
model = _taskModel(self.args, self.model)
trainer.fit(model, data_model)
return
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
# 如果模型有自定义的接口,用模型的口
if hasattr(self.model, _ATTR_PREPARE_INPUT):
return getattr(self.model, _ATTR_PREPARE_INPUT)(inputs, self.tokenizer, **tokenizer_kwargs)
samples = []
if isinstance(inputs, str):
samples.append({self.collator.texta_name: inputs})
else:
# 在__call__里面已经保证了input的类型,所以这里直接else就行
for i in inputs:
samples.append({self.collator.texta_name})
return self.collator(samples)
Pipeline = TextClassificationPipeline
| 9,274 | 38.468085 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/tcbert.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.tcbert.modeling_tcbert import (
TCBertDataModel,
TCBertLitModel,
TCBertPredict,
)
class TCBertPipelines(Pipeline):
@staticmethod
def piplines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--language',
default='chinese', type=str)
total_parser = TCBertDataModel.add_data_specific_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = TCBertLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args, model_path, nlabels):
self.args = args
self.checkpoint_callback = UniversalCheckpoint(args)
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.config = AutoConfig.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(
model_path)
if args.load_checkpoints_path != '':
self.model = TCBertLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args, model_path=model_path, nlabels=nlabels)
print('load model from: ', args.load_checkpoints_path)
else:
self.model = TCBertLitModel(
args, model_path=model_path, nlabels=nlabels)
def train(self, train_data, dev_data, prompt, prompt_label):
data_model = TCBertDataModel(
train_data, dev_data, self.tokenizer, self.args, prompt, prompt_label)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
def predict(self, test_data, prompt, prompt_label, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.model.eval()
predict_model = TCBertPredict(self.model, self.tokenizer, self.args, prompt, prompt_label)
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = predict_model.predict(batch_data)
result.extend(batch_result)
# result = self.postprocess(result)
return result
def preprocess(self, data):
return data
def postprocess(self, data):
return data
def _forward(self, model_inputs):
return self.model(**model_inputs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
| 5,390 | 38.350365 | 116 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/multiplechoice.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AlbertTokenizer
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.unimc.modeling_unimc import (
UniMCDataModel,
UniMCLitModel,
UniMCPredict,
)
class UniMCPipelines(Pipeline):
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--language',
default='chinese', type=str)
total_parser = UniMCDataModel.add_data_specific_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = UniMCLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args, model_path):
self.args = args
self.checkpoint_callback = UniversalCheckpoint(args)
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.config = AutoConfig.from_pretrained(model_path)
if self.config.model_type == 'albert':
self.tokenizer = AlbertTokenizer.from_pretrained(
model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_path)
if args.language == 'chinese':
self.yes_token = self.tokenizer.encode('是')[1]
self.no_token = self.tokenizer.encode('非')[1]
else:
self.yes_token = self.tokenizer.encode('yes')[1]
self.no_token = self.tokenizer.encode('no')[1]
if args.load_checkpoints_path != '':
self.model = UniMCLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args, yes_token=self.yes_token, model_path=model_path)
print('load model from: ', args.load_checkpoints_path)
else:
self.model = UniMCLitModel(
args, yes_token=self.yes_token, model_path=model_path)
def train(self, train_data, dev_data, process=True):
if process:
train_data = self.preprocess(train_data)
dev_data = self.preprocess(dev_data)
data_model = UniMCDataModel(
train_data, dev_data, self.yes_token, self.no_token, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True, process=True):
if process:
test_data = self.preprocess(test_data)
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.model.eval()
predict_model = UniMCPredict(
self.yes_token, self.no_token, self.model, self.tokenizer, self.args)
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = predict_model.predict(batch_data)
result.extend(batch_result)
if process:
result = self.postprocess(result)
return result
def preprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['choice'] = ['不能理解为:'+data[i]
['textb'], '可以理解为:'+data[i]['textb']]
# data[i]['question']='怎么理解这段话?'
data[i]['textb'] = ''
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['choice'] = ['不能推断出:'+data[i]['textb'],
'很难推断出:'+data[i]['textb'], '可以推断出:'+data[i]['textb']]
# data[i]['question']='根据这段话'
data[i]['textb'] = ''
return data
def postprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['textb'] = data[i]['choice'][0].replace('不能理解为:', '')
data[i]['choice'] = ['不相似', '相似']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '不相似'
if '可以' in k:
k = '相似'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['textb'] = data[i]['choice'][0].replace('不能推断出:', '')
data[i]['choice'] = ['矛盾', '自然', '蕴含']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '矛盾'
if '很难' in k:
k = '自然'
if '可以' in k:
k = '蕴含'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
return data
def _forward(self, model_inputs):
return self.model(**model_inputs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
| 7,967 | 39.653061 | 116 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/pipelines/information_extraction.py
|
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning import trainer, loggers
from transformers import AlbertTokenizer
from transformers import AutoConfig,AutoTokenizer
from transformers.pipelines.base import Pipeline
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from fengshen.models.uniex.modeling_uniex import (
UniEXDataModel,
TaskModelCheckpoint,
UniEXLitModel,
FastExtractModel,
ExtractModel
)
class UniEXPipelines:
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_path',
default='./predict.json', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--max_extract_entity_number',
default=1, type=float)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--fast_ex_mode', action='store_true')
total_parser.add_argument('--threshold_index',
default=0.5, type=float)
total_parser.add_argument('--threshold_entity',
default=0.5, type=float)
total_parser.add_argument('--threshold_event',
default=0.5, type=float)
total_parser.add_argument('--threshold_relation',
default=0.5, type=float)
total_parser = UniEXDataModel.add_data_specific_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
total_parser = UniEXLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args):
if args.load_checkpoints_path != '':
self.model = UniEXLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args)
print('导入模型成功:', args.load_checkpoints_path)
else:
self.model = UniEXLitModel(args)
self.args = args
self.checkpoint_callback = TaskModelCheckpoint(args).callbacks
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
added_token = ['[unused'+str(i+1)+']' for i in range(10)]
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, is_split_into_words=True, add_prefix_space=True, additional_special_tokens=added_token)
if args.fast_ex_mode:
self.em = FastExtractModel(self.tokenizer, args)
else:
self.em = ExtractModel(self.tokenizer, args)
def fit(self, train_data, dev_data,test_data=[]):
data_model = UniEXDataModel(
train_data, dev_data, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.model.dev_data = dev_data
self.model.test_data = test_data
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.eval()
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = self.em.extract(
batch_data, self.model.model)
result.extend(batch_result)
return result
| 4,151 | 36.071429 | 127 |
py
|
TFusion
|
TFusion-master/__init__.py
| 1 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/pre_process/market_answer_predict.py
|
from profile.fusion_param import ctrl_msg, get_fusion_param
from train.st_filter import train_tracks
from util.file_helper import read_lines
import numpy as np
from util.serialize import pickle_save
def save_market_train_truth():
ctrl_msg['data_folder_path'] = 'market_market-train'
fusion_param = get_fusion_param()
market_train_tracks = train_tracks(fusion_param)
deltas = [[list() for j in range(6)] for i in range(6)]
for i, market_train_track in enumerate(market_train_tracks):
for j in range(max(0, i - 50), min(i + 60, len(market_train_tracks))):
if market_train_tracks[i][0] == market_train_tracks[j][0] \
and i != j \
and market_train_tracks[i][3] == market_train_tracks[j][3] \
and market_train_tracks[i][1] != market_train_tracks[j][1]:
deltas[market_train_tracks[i][1] - 1][market_train_tracks[j][1] - 1].append(
market_train_tracks[i][2] - market_train_tracks[j][2]
)
for camera_delta in deltas:
for delta_s in camera_delta:
delta_s.sort()
pickle_save('true_market_train.pck', deltas)
def save_market_test_truth():
ctrl_msg['data_folder_path'] = 'market_market-test'
fusion_param = get_fusion_param()
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
query_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
query_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
query_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
gallery_path = fusion_param['gallery_path']
gallery_lines = read_lines(gallery_path)
gallery_tracks = list()
for gallery in gallery_lines:
info = gallery.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
gallery_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
gallery_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
gallery_tracks.extend(query_tracks)
print(len(gallery_tracks))
deltas = [[list() for j in range(6)] for i in range(6)]
for i, market_probe_track in enumerate(gallery_tracks):
if gallery_tracks[i][0] == 0 or gallery_tracks[i][0] == -1:
continue
for j in range(len(gallery_tracks)):
if gallery_tracks[i][0] == gallery_tracks[j][0] \
and i != j \
and gallery_tracks[i][3] == gallery_tracks[j][3] \
and gallery_tracks[i][1] != gallery_tracks[j][1]:
if gallery_tracks[i][1] == 4 and gallery_tracks[j][1] - 1 == 5:
if j >= 19732:
print gallery_tracks[i][2] - gallery_tracks[j][2]
deltas[gallery_tracks[i][1] - 1][gallery_tracks[j][1] - 1].append(
gallery_tracks[i][2] - gallery_tracks[j][2]
)
for camera_delta in deltas:
for delta_s in camera_delta:
delta_s.sort()
pickle_save('true_market_pg.pck', deltas)
def save_market_probe_truth():
ctrl_msg['data_folder_path'] = 'market_market-test'
fusion_param = get_fusion_param()
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
query_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
query_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
query_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
gallery_path = fusion_param['gallery_path']
gallery_lines = read_lines(gallery_path)
gallery_tracks = list()
for gallery in gallery_lines:
info = gallery.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
gallery_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
gallery_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
deltas = [[list() for j in range(6)] for i in range(6)]
for i, market_probe_track in enumerate(query_tracks):
if query_tracks[i][0] == 0 or gallery_tracks[i][0] == -1:
continue
for j in range(len(gallery_tracks)):
if query_tracks[i][0] == gallery_tracks[j][0] \
and i != j \
and query_tracks[i][3] == gallery_tracks[j][3] \
and query_tracks[i][1] != gallery_tracks[j][1]:
deltas[query_tracks[i][1] - 1][gallery_tracks[j][1] - 1].append(
query_tracks[i][2] - gallery_tracks[j][2]
)
for camera_delta in deltas:
for delta_s in camera_delta:
delta_s.sort()
pickle_save('true_market_probe.pck', deltas)
def save_market_img_list(img_list_path, dest_path):
answer_lines = read_lines(img_list_path)
query_tracks = list()
for i, answer in enumerate(answer_lines):
info = answer.split('_')
query_tracks.append([i, info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
query_tracks = np.array(query_tracks).astype(int)
np.savetxt(dest_path, query_tracks, fmt='%d', delimiter='\t')
def save_grid_train_truth():
ctrl_msg['data_folder_path'] = 'market_grid-cv0-train'
fusion_param = get_fusion_param()
market_train_tracks = train_tracks(fusion_param)
deltas = [[list() for j in range(6)] for i in range(6)]
for i, market_train_track in enumerate(market_train_tracks):
for j in range(0, len(market_train_tracks)):
if market_train_tracks[i][0] == market_train_tracks[j][0] \
and i != j \
and market_train_tracks[i][1] != market_train_tracks[j][1]:
deltas[market_train_tracks[i][1] - 1][market_train_tracks[j][1] - 1].append(
market_train_tracks[i][2] - market_train_tracks[j][2]
)
for camera_delta in deltas:
for delta_s in camera_delta:
delta_s.sort()
pickle_save('true_grid-cv0_train.pck', deltas)
def save_grid_test_truth():
ctrl_msg['data_folder_path'] = 'market_grid-cv0-test'
fusion_param = get_fusion_param()
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
query_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
query_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
query_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
gallery_path = fusion_param['gallery_path']
gallery_lines = read_lines(gallery_path)
gallery_tracks = list()
for gallery in gallery_lines:
info = gallery.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
gallery_tracks.append([info[0], int(info[1][0]), int(info[2])])
else:
gallery_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
deltas = [[list() for j in range(6)] for i in range(6)]
for i, market_probe_track in enumerate(query_tracks):
for j in range(len(gallery_tracks)):
if query_tracks[i][0] == gallery_tracks[j][0] \
and i != j \
and query_tracks[i][1] != gallery_tracks[j][1]:
deltas[query_tracks[i][1] - 1][gallery_tracks[j][1] - 1].append(
query_tracks[i][2] - gallery_tracks[j][2]
)
for camera_delta in deltas:
for delta_s in camera_delta:
delta_s.sort()
pickle_save('true_grid-cv0_test.pck', deltas)
if __name__ == '__main__':
# probe_path = '../data/market/probe.txt'
# gallery_path = '../data/market/gallery.txt'
save_grid_train_truth()
save_grid_test_truth()
# save_market_img_list(probe_path, 'market_probe.csv')
# save_market_img_list(gallery_path, 'market_gallery.csv')
# save_market_train_truth()
# save_market_probe_truth()
# save_market_test_truth()
| 8,645 | 40.171429 | 92 |
py
|
TFusion
|
TFusion-master/TrackViz/pre_process/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/profile/fusion_param.py
|
ctrl_msg = {
'data_folder_path': 'market_market-test',
'cv_num': 0,
'ep': 0,
'en': 0
}
update_msg = {}
def get_fusion_param():
origin_dict = {
'renew_pid_path': 'data/' + ctrl_msg['data_folder_path'] + '/renew_pid.log',
'renew_ac_path': 'data/' + ctrl_msg['data_folder_path'] + '/renew_ac.log',
'predict_pid_path': 'data/' + ctrl_msg['data_folder_path'] + '/predict_pid.log',
'answer_path': 'data/' + ctrl_msg['data_folder_path'] + '/test_tracks.txt',
'probe_path': '',
'train_path': '',
'gallery_path': '',
'distribution_pickle_path': 'data/' + ctrl_msg['data_folder_path'] + '/sorted_deltas.pickle',
'src_distribution_pickle_path': 'data/' + ctrl_msg['data_folder_path'].replace('test', 'train') + '/sorted_deltas.pickle',
'persons_deltas_path': 'data/' + ctrl_msg['data_folder_path'] + '/persons_deltas_score.pickle',
'persons_ap_path': 'data/' + ctrl_msg['data_folder_path'] + '/persons_ap_scores.pickle',
'predict_person_path': 'data/' + ctrl_msg['data_folder_path'] + '/predict_persons.pickle',
'mid_score_path': 'data/' + ctrl_msg['data_folder_path'] + '/cross_mid_score.log',
'eval_fusion_path': 'data/' + ctrl_msg['data_folder_path'] + '/cross_filter_pid.log',
'fusion_normal_score_path': 'data/' + ctrl_msg['data_folder_path'] + '/cross_filter_score.log',
'ep': ctrl_msg['ep'],
'en': ctrl_msg['en']
}
if '_grid' in ctrl_msg['data_folder_path'] and '_grid_' not in ctrl_msg['data_folder_path']:
origin_dict['probe_path'] = 'data/grid/grid-cv' + str(ctrl_msg['cv_num']) + '-probe.txt'
origin_dict['train_path'] = 'data/grid/grid-cv' + str(ctrl_msg['cv_num']) + '-train.txt'
origin_dict['gallery_path'] = 'data/grid/grid-cv' + str(ctrl_msg['cv_num']) + '-gallery.txt'
elif '_market' in ctrl_msg['data_folder_path'] and '_market_' not in ctrl_msg['data_folder_path']:
origin_dict['probe_path'] = 'data/market/probe.txt'
origin_dict['train_path'] = 'data/market/train.txt'
origin_dict['gallery_path'] = 'data/market/gallery.txt'
elif '_duke' in ctrl_msg['data_folder_path']:
origin_dict['probe_path'] = 'data/duke/probe.list'
origin_dict['train_path'] = 'data/duke/train.list'
origin_dict['gallery_path'] = 'data/duke/test.list'
if 'train' in ctrl_msg['data_folder_path']:
origin_dict['answer_path'] = origin_dict['train_path']
else:
origin_dict['answer_path'] = origin_dict['probe_path']
if 'r-' in origin_dict['src_distribution_pickle_path']:
# use track info before increment
origin_dict['rand_distribution_pickle_path'] = origin_dict['src_distribution_pickle_path'].replace('r-train',
'train_rand')
else:
# use track info after increment
origin_dict['rand_distribution_pickle_path'] = origin_dict['src_distribution_pickle_path'].replace('train',
'train_rand')
origin_dict['rand_distribution_pickle_path'] = origin_dict['src_distribution_pickle_path'].replace('train',
'train_rand')
for (k, v) in update_msg.items():
origin_dict[k] = v
return origin_dict
fusion_param = get_fusion_param()
| 3,550 | 52.80303 | 130 |
py
|
TFusion
|
TFusion-master/TrackViz/profile/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/post_process/grid_summary.py
|
from util.file_helper import read_lines, write
def avg_acc(grid_eval_path):
grid_infos = read_lines(grid_eval_path)
before_vision_accs = [0.0, 0.0, 0.0]
before_fusion_accs = [0.0, 0.0, 0.0]
after_vision_accs = [0.0, 0.0, 0.0]
after_fusion_accs = [0.0, 0.0, 0.0]
i_cv_cnt = 0
for i, grid_info in enumerate(grid_infos):
if i % 2 != 0:
accs = grid_info.split()
if i_cv_cnt % 4 == 0:
for j in range(3):
before_vision_accs[j] += float(accs[j])
if i_cv_cnt % 4 == 1:
for j in range(3):
before_fusion_accs[j] += float(accs[j])
if i_cv_cnt % 4 == 2:
for j in range(3):
after_vision_accs[j] += float(accs[j])
if i_cv_cnt % 4 == 3:
for j in range(3):
after_fusion_accs[j] += float(accs[j])
i_cv_cnt += 1
write('grid_eval.log', '\n' + grid_eval_path + '\n')
write('grid_eval.log', 'before_retrain_vision\n%f %f %f\n' % (before_vision_accs[0]/10, before_vision_accs[1]/10, before_vision_accs[2]/10))
write('grid_eval.log', 'before_retrain_fusion\n%f %f %f\n' % (before_fusion_accs[0]/10, before_fusion_accs[1]/10, before_fusion_accs[2]/10))
write('grid_eval.log', 'after_retrain_vision\n%f %f %f\n' % (after_vision_accs[0]/10, after_vision_accs[1]/10, after_vision_accs[2]/10))
write('grid_eval.log', 'after_retrain_fusion\n%f %f %f\n' % (after_fusion_accs[0]/10, after_fusion_accs[1]/10, after_fusion_accs[2]/10))
if __name__ == '__main__':
avg_acc('market_grid.txt')
avg_acc('cuhk_grid.txt')
avg_acc('viper_grid.txt')
avg_acc('grid_grid.txt')
| 1,724 | 44.394737 | 144 |
py
|
TFusion
|
TFusion-master/TrackViz/post_process/track_prob.py
|
# coding=utf-8
from util.serialize import pickle_load
def binary_search(a, target):
# 不同于普通的二分查找,目标是寻找target最适合的index
low = 0
high = len(a) - 1
while low <= high:
mid = (low + high) // 2
mid_val = a[mid]
if mid_val < target:
low = mid + 1
elif mid_val > target:
high = mid - 1
else:
return mid
return low
def track_score(camera_delta_s, camera1, time1, camera2, time2, interval=100, test=True, filter_interval=1000):
if abs(time1 - time2) > filter_interval:
return -1.
camera1 -= 1
camera2 -= 1
# if test and camera1 == camera2:
# return 0.0000001
cur_delta = time1 - time2
delta_distribution = camera_delta_s[camera1][camera2]
total_cnt = sum(map(len, camera_delta_s[camera1]))
# 10 second
left_bound = cur_delta - interval
right_bound = cur_delta + interval
# 二分查找位置,得到容错区间内时空点数量
left_index = binary_search(delta_distribution, left_bound)
right_index = binary_search(delta_distribution, right_bound)
if total_cnt == 0 or len(camera_delta_s[camera1][camera2]) == 0:
return 0.0
# 这里除以total_cnt而非len(camera_delta_s[camera1][camera2]),体现空间概率
score = (right_index - left_index) / float(total_cnt)
# 训练集中同摄像头概率很高,但评估又不要同摄像头的,体现空间概率很不划算
# score = (right_index - left_index + 1) / float(len(camera_delta_s[camera1][camera2]))
if len(delta_distribution) == 0:
return 0.0
# score = (right_index - left_index + 1) / float(len(camera_delta_s[camera1][2]))
# if score > 0:
# print(len(delta_distribution))
# print('delta range %d ~ %d' % (delta_distribution[0], delta_distribution[-1]))
# print(left_index)
# print(right_index)
# print('probablity: %f%%' % (score * 100))
return score
def track_interval_score(interval_score_s, camera1, time1, camera2, time2):
delta = time2 - time1
for i, camera_pair_travel_prob in enumerate(interval_score_s[camera1 - 1][camera2 - 1]):
if camera_pair_travel_prob['left'] < delta < camera_pair_travel_prob['right']:
print('camera1: %d, camera2: %d, delta: %d, interval: %d, prob: %f' % (
camera1, camera2, delta, i, camera_pair_travel_prob['prob']))
return camera_pair_travel_prob['prob']
return 0
if __name__ == '__main__':
camera_delta_s = pickle_load('data/top10/sorted_deltas.pickle')
track_score(camera_delta_s, 1, 25, 2, 250)
| 2,487 | 36.134328 | 111 |
py
|
TFusion
|
TFusion-master/TrackViz/post_process/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/util/str_helper.py
|
def folder(path):
final_slash_idx = -1
for i, c in enumerate(path):
if c == '/':
final_slash_idx = i
if final_slash_idx == -1:
return path
else:
return path[: final_slash_idx]
if __name__ == '__main__':
print(folder('data/top10/test.txt'))
| 298 | 20.357143 | 40 |
py
|
TFusion
|
TFusion-master/TrackViz/util/file_helper.py
|
import os
def write_line(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content + '\n')
def write(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content)
def read_lines(path):
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
content.append(line)
return content
def read_lines_and(path, on_line):
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
on_line(line)
return content
def read_lines_idx_and(path, on_line):
line_idx = 0
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
on_line(line, line_idx)
line_idx += 1
return content
def safe_remove(path):
if os.path.exists(path):
os.remove(path)
return True
else:
return False
def safe_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
| 1,627 | 20.706667 | 40 |
py
|
TFusion
|
TFusion-master/TrackViz/util/viz.py
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def draw_line(y_s, x_s, y_label, x_label, y_titles, title, line_color=None):
# plt.subplots()
plt.subplots(figsize=(6, 5))
sns.set(font_scale=2.4)
line_styles = ['--', '-']
for i in range(len(y_s)):
plt.plot(x_s, y_s[i], color=line_color, label=y_titles[i], linestyle=line_styles[i], linewidth=5.)
plt.xlabel(x_label, fontsize=32)
plt.ylabel(y_label, fontsize=32)
plt.ylim(min(min(y_s[0]), min(y_s[1]))*0.8, max(max(y_s[0]), max(y_s[1]))*1.2)
plt.xlim(min(x_s), max(x_s))
plt.yticks(fontsize=32)
plt.xticks(fontsize=32)
plt.legend()
plt.title(title)
plt.show()
if __name__ == '__main__':
accs = np.genfromtxt('../increment_acc.txt', delimiter='\t')
draw_line(accs, np.arange(1, 11), 'Rank1_acc', 'iteration times',['vision', 'fusion'], title='')
# plt.subplots()
# plt.plot(np.arange(1, 11), accs[0], label='vision')
# plt.legend()
# plt.plot(np.arange(1, 11), accs[0], label='fusion')
# plt.legend()
# plt.xlabel('Rank1_acc')
# plt.ylabel('iteration times')
# plt.ylim(0.2, 0.4)
# plt.title('')
# plt.show()
| 1,198 | 32.305556 | 106 |
py
|
TFusion
|
TFusion-master/TrackViz/util/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/util/serialize.py
|
import os
import pickle
import random
def random6():
return random.randint(100000, 999999)
def pickle_save(path, obj):
try:
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', path, e)
return False
return True
def pickle_load(path):
if os.path.exists(path):
return pickle.load(open(path, 'rb'))
return None
if __name__ == '__main__':
pass
| 493 | 18 | 56 |
py
|
TFusion
|
TFusion-master/TrackViz/train/st_estim.py
|
#coding=utf-8
from random import randint
import shutil
from profile.fusion_param import ctrl_msg
from util.file_helper import read_lines, safe_remove, safe_mkdir
from util.serialize import pickle_save
from util.str_helper import folder
def prepare_rand_folder(fusion_param):
rand_predict_path = fusion_param['renew_pid_path'].replace(ctrl_msg['data_folder_path'],
ctrl_msg['data_folder_path'] + '_rand')
rand_folder_path = folder(rand_predict_path)
safe_mkdir(rand_folder_path)
# although copy all info including pid info, but not use in later training
shutil.copy(fusion_param['renew_pid_path'], rand_predict_path)
def prepare_diff_folder(fusion_param):
diff_predict_path = fusion_param['renew_pid_path'].replace(ctrl_msg['data_folder_path'],
ctrl_msg['data_folder_path'] + '_diff')
diff_folder_path = folder(diff_predict_path)
safe_mkdir(diff_folder_path)
# although copy all info including pid info, but not use in later training
shutil.copy(fusion_param['renew_pid_path'], diff_predict_path)
def get_predict_delta_tracks(fusion_param, useful_predict_limit=10, random=False, diff_person=False, use_real_st=False):
# 获取左图列表
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
camera_cnt = 6
real_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
#
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
# grid
real_tracks.append([info[0], int(info[1][0]), int(info[2]), 1])
elif 'f' in info[2]:
real_tracks.append([info[0], int(info[1][1]), int(info[2][1:-5]), 1])
camera_cnt = 8
else:
# market
real_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
print 'left image ready'
# 获取右图列表
renew_pid_path = fusion_param['renew_pid_path']
predict_lines = read_lines(renew_pid_path)
print 'predict images ready'
# 左图中的人在右图可能出现在6个摄像头中
camera_delta_s = [[list() for j in range(camera_cnt)] for i in range(camera_cnt)]
person_cnt = len(answer_lines)
# market1501数据集有六个序列,只有同一个序列才能计算delta
if random:
useful_predict_limit = max(len(predict_lines)/100, 10)
for i, line in enumerate(predict_lines):
predict_pids = line.split(' ')
useful_cnt = 0
for j, predict_pid in enumerate(predict_pids):
if useful_cnt > useful_predict_limit:
break
if random:
predict_pid = randint(0, person_cnt - 1)
elif diff_person:
predict_pid = randint(10, person_cnt - 1)
else:
# todo transfer: if predict by python, start from 0, needn't minus 1
predict_pid = int(predict_pid)
predict_pid = int(predict_pid)
# same seq
# todo ignore same camera track
if real_tracks[i][3] == real_tracks[predict_pid][3] and real_tracks[i][1] != real_tracks[predict_pid][1]:
# and pid equal: real st
# if use_real_st and random or real_tracks[i][0] == real_tracks[predict_pid][0]:
if True:
useful_cnt += 1
delta = real_tracks[i][2] - real_tracks[predict_pid][2]
if abs(delta) < 1000000:
camera_delta_s[real_tracks[i][1] - 1][real_tracks[predict_pid][1] - 1].append(delta)
print 'deltas collected'
for camera_delta in camera_delta_s:
for delta_s in camera_delta:
delta_s.sort()
print 'deltas sorted'
# for python
safe_remove(fusion_param['distribution_pickle_path'])
pickle_save(fusion_param['distribution_pickle_path'], camera_delta_s)
print 'deltas saved'
return camera_delta_s
| 3,995 | 41.063158 | 120 |
py
|
TFusion
|
TFusion-master/TrackViz/train/st_filter.py
|
# coding=utf-8
from post_process.track_prob import track_score
from profile.fusion_param import get_fusion_param, ctrl_msg
from util.file_helper import read_lines, read_lines_and, write, safe_remove
from util.serialize import pickle_load
import numpy as np
import os
def smooth_score(c1, c2, time1, time2, camera_delta_s):
track_interval = 20
smooth_window_size = 10
smooth_scores = [
track_score(camera_delta_s, c1,
time1 - (smooth_window_size / 2 - 1) * track_interval + j * track_interval, c2, time2,
interval=track_interval)
for j in range(smooth_window_size)]
# filter
for j in range(smooth_window_size):
if smooth_scores[j] < 0.01:
smooth_scores[j] = 0
# smooth
score = sum(smooth_scores) / len(smooth_scores)
return score
def predict_track_scores(real_tracks, camera_delta_s, fusion_param, smooth=False):
# fusion_param = get_fusion_param()
# persons_deltas_score = pickle_load(fusion_param['persons_deltas_path'])
# if pickle_load(fusion_param['persons_deltas_path']) is not None:
# return persons_deltas_score
predict_path = fusion_param['renew_pid_path']
# test_tracks.txt
top_cnt = 10
persons_deltas_score = list()
# todo 不用读第二遍
pids4probes = np.genfromtxt(predict_path, delimiter=' ')
for probe_i, pids4probe in enumerate(pids4probes):
person_deltas_score = list()
for pid4probe in pids4probe:
# todo transfer: if predict by python, start from 0, needn't minus 1
pid4probe = int(pid4probe)
# predict_idx = predict_idx - 1
if len(real_tracks[pid4probe]) > 3:
s1 = real_tracks[pid4probe][3]
s2 = real_tracks[probe_i][3]
if s1 != s2:
person_deltas_score.append(-1.0)
continue
time1 = real_tracks[pid4probe][2]
# if track_score_idx == 3914:
# print 'test'
time2 = real_tracks[probe_i][2]
c1 = real_tracks[pid4probe][1]
c2 = real_tracks[probe_i][1]
if smooth:
score = smooth_score(c1, c2, time1, time2, camera_delta_s)
else:
# 给定摄像头,时间,获取时空评分,这里camera_deltas如果是随机算出来的,则是随机评分
# todo grid 需要 改区间大小
score = track_score(camera_delta_s, c1, time1, c2, time2, interval=700, filter_interval=40000)
person_deltas_score.append(score)
probe_i += 1
persons_deltas_score.append(person_deltas_score)
return persons_deltas_score
def predict_img_scores(fusion_param):
# fusion_param = get_fusion_param()
# final_persons_scores = pickle_load(fusion_param['persons_ap_path'])
# if pickle_load(fusion_param['persons_ap_path']) is not None:
# return final_persons_scores
predict_score_path = fusion_param['renew_ac_path']
vision_persons_scores = np.genfromtxt(predict_score_path, delimiter=' ').astype(float)
# pickle_save(fusion_param['persons_ap_path'], final_persons_scores)
return vision_persons_scores
def predict_pids(fusion_param):
# fusion_param = get_fusion_param()
# predict_persons = pickle_load(fusion_param['predict_person_path'])
# if pickle_load(fusion_param['predict_person_path']) is not None:
# return predict_persons
predict_person_path = fusion_param['renew_pid_path']
predict_persons = np.genfromtxt(predict_person_path, delimiter=' ').astype(int)
# pickle_save(fusion_param['predict_person_path'], predict_persons)
return predict_persons
def get_person_pids(predict_path):
predict_person_path = predict_path
predict_persons = np.genfromtxt(predict_person_path, delimiter=' ').astype(int)
return predict_persons
def train_tracks(fusion_param):
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
# 左图
real_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
real_tracks.append([info[0], int(info[1][0]), int(info[2])])
elif 'f' in info[2]:
real_tracks.append([info[0], int(info[1][1]), int(info[2][1:-5]), 1])
else:
real_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
return real_tracks
def fusion_st_img_ranker(fusion_param):
ep = fusion_param['ep']
en = fusion_param['en']
# 从renew_pid和renew_ac获取预测的人物id和图像分数
persons_ap_scores = predict_img_scores(fusion_param)
persons_ap_pids = predict_pids(fusion_param)
# 从磁盘获取之前建立的时空模型,以及随机时空模型
camera_delta_s = pickle_load(fusion_param['distribution_pickle_path'])
rand_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'])
diff_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'].replace('rand', 'diff'))
real_tracks = train_tracks(fusion_param)
# 计算时空评分和随机时空评分
persons_track_scores = predict_track_scores(real_tracks, camera_delta_s, fusion_param)
rand_track_scores = predict_track_scores(real_tracks, rand_delta_s, fusion_param)
diff_track_scores = predict_track_scores(real_tracks, diff_delta_s, fusion_param)
persons_cross_scores = list()
log_path = fusion_param['eval_fusion_path']
map_score_path = fusion_param['fusion_normal_score_path']
safe_remove(map_score_path)
safe_remove(log_path)
line_log_cnt = 10
for i, person_ap_pids in enumerate(persons_ap_pids):
cross_scores = list()
for j, person_ap_pid in enumerate(person_ap_pids):
if rand_track_scores[i][j] < 0.00002:
cross_score = (persons_track_scores[i][j]*(1-ep) - en*diff_track_scores[i][j]) * (persons_ap_scores[i][j]+ep/(1-ep-en)) / 0.00002
else:
cross_score = (persons_track_scores[i][j] * (1 - ep) - en * diff_track_scores[i][j]) * (
persons_ap_scores[i][j] + ep / (1 - ep - en)) / rand_track_scores[i][j]
cross_scores.append(cross_score)
persons_cross_scores.append(cross_scores)
print 'img score ready'
max_score = max([max(predict_cross_scores) for predict_cross_scores in persons_cross_scores])
for i, person_cross_scores in enumerate(persons_cross_scores):
for j, person_cross_score in enumerate(person_cross_scores):
if person_cross_score > 0:
# diff seq not sort and not normalize
persons_cross_scores[i][j] /= max_score
else:
persons_cross_scores[i][j] *= -0.00002
# if real_tracks[i][1] == real_tracks[persons_ap_pids[i][j]][1]:
# # print '%d, %d' % (i, j)
# persons_cross_scores[i][j] = 0
person_score_idx_s = list()
top1_scores = list()
print 'above person score ready'
for i, person_cross_scores in enumerate(persons_cross_scores):
# 单个probe的预测结果中按score排序,得到index,用于对pid进行排序
sort_score_idx_s = sorted(range(len(person_cross_scores)), key=lambda k: -person_cross_scores[k])
person_score_idx_s.append(sort_score_idx_s)
# 统计top1分布,后面计算中位数用
top1_scores.append(person_cross_scores[sort_score_idx_s[0]])
# 降序排,取前60%处的分数
sorted_top1_scores = sorted(top1_scores, reverse=True)
mid_score = sorted_top1_scores[int(len(sorted_top1_scores) * 0.5)]
mid_score_path = fusion_param['mid_score_path']
safe_remove(mid_score_path)
write(mid_score_path, '%f\n' % mid_score)
print(str(mid_score))
sorted_persons_ap_pids = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
sorted_persons_ap_scores = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
for i, person_ap_pids in enumerate(persons_ap_pids):
for j in range(len(person_ap_pids)):
sorted_persons_ap_pids[i][j] = persons_ap_pids[i][person_score_idx_s[i][j]]
sorted_persons_ap_scores[i][j] = persons_cross_scores[i][person_score_idx_s[i][j]]
np.savetxt(log_path, sorted_persons_ap_pids, fmt='%d')
np.savetxt(map_score_path, sorted_persons_ap_scores, fmt='%f')
def gallery_track_scores(query_tracks, gallery_tracks, camera_delta_s, fusion_param, smooth=False):
predict_path = fusion_param['renew_pid_path']
persons_deltas_score = list()
pids4probes = np.genfromtxt(predict_path, delimiter=' ')
for probe_i, pids4probe in enumerate(pids4probes):
person_deltas_score = list()
for i, pid4probe in enumerate(pids4probe):
# if i >= top_cnt:
# break
pid4probe = int(pid4probe)
if len(query_tracks[0]) > 3:
# market index minus 1
probe_i_tmp = probe_i # (probe_i + 1) % len(pids4probes)
else:
probe_i_tmp = probe_i
# todo transfer: if predict by python, start from 0, needn't minus 1
# predict_idx = predict_idx - 1
if len(query_tracks[probe_i_tmp]) > 3:
s1 = query_tracks[probe_i_tmp][3]
# print predict_idx
s2 = gallery_tracks[pid4probe][3]
if s1 != s2:
person_deltas_score.append(-1.0)
continue
time1 = query_tracks[probe_i_tmp][2]
# if track_score_idx == 3914:
# print 'test'
time2 = gallery_tracks[pid4probe][2]
c1 = query_tracks[probe_i_tmp][1]
c2 = gallery_tracks[pid4probe][1]
if smooth:
score = smooth_score(c1, c2, time1, time2, camera_delta_s)
else:
# 给定摄像头,时间,获取时空评分,这里camera_deltas如果是随机算出来的,则是随机评分
if 'market_market' in predict_path:
score = track_score(camera_delta_s, c1, time1, c2, time2, interval=100, filter_interval=500)
elif '_market' in predict_path:
score = track_score(camera_delta_s, c1, time1, c2, time2, interval=700, filter_interval=40000)
elif '_duke' in predict_path:
score = track_score(camera_delta_s, c1, time1, c2, time2, interval=700, filter_interval=50000)
else:
score = track_score(camera_delta_s, c1, time1, c2, time2)
person_deltas_score.append(score)
probe_i += 1
persons_deltas_score.append(person_deltas_score)
return persons_deltas_score
def fusion_st_gallery_ranker(fusion_param):
ep = fusion_param['ep']
en = fusion_param['en']
log_path = fusion_param['eval_fusion_path']
map_score_path = fusion_param['fusion_normal_score_path'] # fusion_param = get_fusion_param()
# answer path is probe path
answer_path = fusion_param['answer_path']
answer_lines = read_lines(answer_path)
query_tracks = list()
for answer in answer_lines:
info = answer.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
query_tracks.append([info[0], int(info[1][0]), int(info[2])])
elif 'f' in info[2]:
query_tracks.append([info[0], int(info[1][1]), int(info[2][1:-5]), 1])
else:
query_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
gallery_path = fusion_param['gallery_path']
gallery_lines = read_lines(gallery_path)
gallery_tracks = list()
for gallery in gallery_lines:
info = gallery.split('_')
if 'bmp' in info[2]:
info[2] = info[2].split('.')[0]
if len(info) > 4 and 'jpe' in info[6]:
gallery_tracks.append([info[0], int(info[1][0]), int(info[2])])
elif 'f' in info[2]:
gallery_tracks.append([info[0], int(info[1][1]), int(info[2][1:-5]), 1])
else:
gallery_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])])
print 'probe and gallery tracks ready'
persons_ap_scores = predict_img_scores(fusion_param)
persons_ap_pids = predict_pids(fusion_param)
print 'read vision scores and pids ready'
if 'market_market' in log_path:
for i, person_ap_scores in enumerate(persons_ap_scores):
cur_max_vision = 0
for j, person_ap_score in enumerate(person_ap_scores):
if query_tracks[i][1] != gallery_tracks[persons_ap_pids[i][j]][1]:
# diff vision
cur_max_vision = person_ap_score
persons_ap_scores[i] /= cur_max_vision
camera_delta_s = pickle_load(fusion_param['distribution_pickle_path'])
# camera_delta_s = pickle_load('true_market_probe.pck')
print 'load track deltas ready'
rand_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'])
print 'load rand deltas ready'
diff_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'].replace('rand', 'diff'))
print 'load diff deltas ready'
# todo tmp diff deltas
# diff_delta_s = rand_delta_s
rand_track_scores = gallery_track_scores(query_tracks, gallery_tracks, rand_delta_s, fusion_param)
print 'rand scores ready'
persons_track_scores = gallery_track_scores(query_tracks, gallery_tracks, camera_delta_s, fusion_param)
print 'track scores ready'
diff_track_scores = gallery_track_scores(query_tracks, gallery_tracks, diff_delta_s, fusion_param)
print 'diff track score ready'
# todo tmp diff scores
# diff_track_scores = rand_track_scores
persons_cross_scores = list()
safe_remove(map_score_path)
safe_remove(log_path)
# fusion_track_scores = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
# for i, person_ap_pids in enumerate(persons_ap_pids):
# for j, person_ap_pid in enumerate(person_ap_pids):
# cur_track_score = persons_track_scores[i][j]
# rand_track_score = rand_track_scores[i][j]
# if rand_track_score < 0.00002:
# rand_track_score = 0.00002
# fusion_track_scores[i][j] = (cur_track_score * (1 - ep) - en * diff_track_scores[i][j]) / rand_track_score
# for i, person_ap_pids in enumerate(persons_ap_pids):
# cur_max_predict = max(persons_track_scores[i])
# cur_max_rand = max(rand_track_scores[i])
# for j in range(len(fusion_track_scores[i])):
# if fusion_track_scores[i][j] >= 0:
# fusion_track_scores[i][j] /= cur_max_predict/cur_max_rand
# else:
# fusion_track_scores[i][j] = 1.
for i, person_ap_pids in enumerate(persons_ap_pids):
cross_scores = list()
for j, person_ap_pid in enumerate(person_ap_pids):
cur_track_score = persons_track_scores[i][j]
rand_track_score = rand_track_scores[i][j]
if rand_track_score < 0:
rand_track_score = 0.00002
elif rand_track_score < 0.00002:
rand_track_score = 0.00002
if cur_track_score != 0:
cur_track_score = -1
cross_score = (cur_track_score * (1 - ep) - en * diff_track_scores[i][j]) * (
persons_ap_scores[i][j] + ep / (1 - ep - en)) / rand_track_score
if i == 0 and j % 100 ==0:
print '%f %f %f %f %f' % (cur_track_score, diff_track_scores[i][j], persons_ap_scores[i][j], rand_track_score, cross_score)
if cur_track_score > 0 and cross_score < 0:
cross_score = 0
cross_scores.append(cross_score)
if max(cross_scores) == 0:
print i
persons_cross_scores.append(cross_scores)
print 'fusion scores ready'
# pickle_save(ctrl_msg['data_folder_path']+'viper_r-testpersons_cross_scores.pick', persons_cross_scores)
# pickle_save(ctrl_msg['data_folder_path']+'viper_r-testpersons_ap_pids.pick', persons_ap_pids)
max_score_s = [max(predict_cross_scores) for predict_cross_scores in persons_cross_scores]
for i, person_cross_scores in enumerate(persons_cross_scores):
for j, person_cross_score in enumerate(person_cross_scores):
if persons_cross_scores[i][j] >= 0:
# diff seq not sort, not rank for max, and not normalize
if max_score_s[i] == 0:
# there exist probe track with same seq, diff camera but value > 1000
print i
else:
persons_cross_scores[i][j] /= max_score_s[i]
# persons_cross_scores[i][j] /= max_score
# if persons_cross_scores[i][j] > 0.5:
# print 'same'
# print persons_cross_scores[i][j]
else:
# so diff seq is negative, normalize by minimum
# persons_cross_scores[i][j] /= min_score_s[i]
# persons_cross_scores[i][j] *= 1.0
persons_cross_scores[i][j] *= -0.00002
print 'fusion scores normalized, diff seq use vision score to rank'
person_score_idx_s = list()
for i, person_cross_scores in enumerate(persons_cross_scores):
# 单个probe的预测结果中按score排序,得到index,用于对pid进行排序
sort_score_idx_s = sorted(range(len(person_cross_scores)), key=lambda k: -person_cross_scores[k])
person_score_idx_s.append(sort_score_idx_s)
sorted_persons_ap_pids = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
sorted_persons_ap_scores = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
for i, person_ap_pids in enumerate(persons_ap_pids):
for j in range(len(person_ap_pids)):
sorted_persons_ap_pids[i][j] = persons_ap_pids[i][person_score_idx_s[i][j]]
sorted_persons_ap_scores[i][j] = persons_cross_scores[i][person_score_idx_s[i][j]]
print 'sorted scores ready'
np.savetxt(log_path, sorted_persons_ap_pids, fmt='%d')
np.savetxt(map_score_path, sorted_persons_ap_scores, fmt='%f')
print 'save sorted fusion scores'
# for i, person_ap_pids in enumerate(persons_ap_pids):
# for j in range(len(person_ap_pids)):
# write(log_path, '%d ' % person_ap_pids[person_score_idx_s[i][j]])
# write(map_score_path, '%.3f ' % persons_cross_scores[i][person_score_idx_s[i][j]])
# write(log_path, '\n')
# write(map_score_path, '\n')
return person_score_idx_s
if __name__ == '__main__':
ctrl_msg['data_folder_path'] = 'cuhk_duke-r-test'
ctrl_msg['ep'] = 0.0
ctrl_msg['en'] = 0.0
# fusion_param = get_fusion_param()
# fusion_st_img_ranker(fusion_param, fusion_param['pos_shot_rate'], fusion_param['neg_shot_rate'])
# eval_on_train_test(fusion_param, test_mode=True)
fusion_param = get_fusion_param()
fusion_st_gallery_ranker(fusion_param)
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
# os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
# + 'market /home/cwh/coding/TrackViz/' + fusion_param['eval_fusion_path'])
os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
+ 'duke /home/cwh/coding/TrackViz/' + fusion_param['eval_fusion_path'])
# fusion_st_img_ranker(fusion_param)
# delta_range, over_probs = fusion_curve(fusion_param)
# viz_fusion_curve(delta_range, [over_probs])
# pt = fusion_heat(fusion_param)
# viz_heat_map(pt)
| 19,506 | 45.556086 | 145 |
py
|
TFusion
|
TFusion-master/TrackViz/train/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/ctrl/transfer.py
|
import os
from ctrl.img_st_fusion import init_strict_img_st_fusion
from profile.fusion_param import ctrl_msg, get_fusion_param
from util.file_helper import safe_mkdir
def fusion_dir_prepare(source, target):
fusion_data_path = '/home/cwh/coding/TrackViz/data/'
fusion_train_dir = fusion_data_path + '/' + source + '_' + target + '-train'
fusion_test_dir = fusion_data_path + '/' + source + '_' + target + '-test'
safe_mkdir(fusion_train_dir)
safe_mkdir(fusion_test_dir)
return fusion_train_dir, fusion_test_dir
def vision_rank(source, target):
# vision classifier predict similarity rank table
fusion_train_dir, fusion_test_dir = fusion_dir_prepare(source, target)
vision_train_rank_pids_path = fusion_train_dir + '/renew_pid.log'
vision_train_rank_scores_path = fusion_train_dir + '/renew_ac.log'
vision_test_rank_pids_path = fusion_test_dir + '/renew_pid.log'
vision_test_rank_scores_path = fusion_test_dir + '/renew_ac.log'
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 0 '
+ source + ' ' + target + ' '
+ vision_train_rank_pids_path + ' '
+ vision_train_rank_scores_path + ' '
+ vision_test_rank_pids_path + ' '
+ vision_test_rank_scores_path)
return vision_train_rank_pids_path, vision_train_rank_scores_path, vision_test_rank_pids_path, vision_test_rank_scores_path
def dataset_eval(source, target, rank_pids_path):
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
+ target + ' ' + rank_pids_path)
def st_fusion(source, target):
ctrl_msg['data_folder_path'] = source + '_' + target + '-train'
init_strict_img_st_fusion()
ctrl_msg['data_folder_path'] = source + '_' + target + '-train'
fusion_data_path = '/home/cwh/coding/TrackViz/'
fusion_param = get_fusion_param()
fusion_train_rank_pids_path = fusion_data_path + fusion_param['eval_fusion_path']
fusion_train_rank_scores_path = fusion_data_path + fusion_param['fusion_normal_score_path']
ctrl_msg['data_folder_path'] = source + '_' + target + '-test'
fusion_param = get_fusion_param()
fusion_test_rank_pids_path = fusion_data_path + fusion_param['eval_fusion_path']
fusion_test_rank_scores_path = fusion_data_path + fusion_param['fusion_normal_score_path']
return fusion_train_rank_pids_path, fusion_train_rank_scores_path, fusion_test_rank_pids_path, fusion_test_rank_scores_path
def rank_transfer(source, target, fusion_train_rank_pids_path, fusion_train_rank_scores_path):
fusion_train_dir, fusion_test_dir = fusion_dir_prepare(source, target + '-r')
transfer_train_rank_pids_path = fusion_train_dir + '/renew_pid.log'
transfer_train_rank_scores_path = fusion_train_dir + '/renew_ac.log'
transfer_test_rank_pids_path = fusion_test_dir + '/renew_pid.log'
transfer_test_rank_scores_path = fusion_test_dir + '/renew_ac.log'
if 'grid' in target:
target_train_list = '/home/cwh/coding/TrackViz/data/grid/' + target + '-train.txt'
elif target == 'markets1':
target_train_list = '/home/cwh/coding/TrackViz/data/markets1/train.txt'
elif target == 'market':
target_train_list = '/home/cwh/coding/TrackViz/data/market/train.txt'
elif target == 'duke':
target_train_list = '/home/cwh/coding/TrackViz/data/duke/train.list'
else:
target_train_list = 'error_target_dataset'
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 1 '
+ source + ' ' + target + ' '
+ fusion_train_rank_pids_path + ' '
+ fusion_train_rank_scores_path + ' '
+ transfer_train_rank_pids_path + ' '
+ transfer_train_rank_scores_path + ' '
+ transfer_test_rank_pids_path + ' '
+ transfer_test_rank_scores_path + ' '
+ target_train_list)
return transfer_train_rank_pids_path, transfer_train_rank_scores_path, transfer_test_rank_pids_path, transfer_test_rank_scores_path
def fusion_transfer(source, target):
# vision rank and eval
vision_train_rank_pids_path, vision_train_rank_scores_path, \
vision_test_rank_pids_path, vision_test_rank_scores_path \
= vision_rank(source, target)
# fusion rank and eval
fusion_train_rank_pids_path, fusion_train_rank_scores_path, \
fusion_test_rank_pids_path, fusion_test_rank_scores_path = st_fusion(source, target)
dataset_eval(source, target, fusion_test_rank_pids_path)
iteration_cnt = 1
for i in range(iteration_cnt):
# rank transfer, rank and eval
transfer_train_rank_pids_path, transfer_train_rank_scores_path, \
transfer_test_rank_pids_path, transfer_test_rank_scores_path \
= rank_transfer(source, target, fusion_train_rank_pids_path, fusion_train_rank_scores_path)
transfer_target = target + '-r'
# fusion rank and eval
fusion_train_rank_pids_path, fusion_train_rank_scores_path, \
fusion_test_rank_pids_path, fusion_test_rank_scores_path \
= st_fusion(source, transfer_target)
dataset_eval(source, transfer_target, fusion_test_rank_pids_path)
def dataset_fusion_transfer():
sources = ['market', 'cuhk', 'viper', 'grid']
targets = ['grid','market']
for target in targets:
for source in sources:
if 'grid' in target:
for i in range(0, 10):
if 'grid' in source:
fusion_transfer('grid-cv-%d' % i, 'grid-cv%d' % i)
else:
fusion_transfer(source, 'grid-cv%d' % i)
else:
fusion_transfer(source, target)
if __name__ == '__main__':
dataset_fusion_transfer()
| 6,036 | 46.535433 | 135 |
py
|
TFusion
|
TFusion-master/TrackViz/ctrl/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/TrackViz/ctrl/img_st_fusion.py
|
#coding=utf-8
import shutil
import os
from profile.fusion_param import get_fusion_param, ctrl_msg
from train.st_estim import get_predict_delta_tracks, prepare_rand_folder, prepare_diff_folder
from train.st_filter import fusion_st_img_ranker, fusion_st_gallery_ranker
# need to run on src directory
from util.file_helper import safe_remove, safe_mkdir
def test_fusion(fusion_param, ep=0.5, en=0.01):
# copy sort pickle
safe_remove(fusion_param['distribution_pickle_path'])
try:
# 直接使用训练集的时空模型
shutil.copy(fusion_param['src_distribution_pickle_path'], fusion_param['distribution_pickle_path'])
print 'copy train track distribute pickle done'
except shutil.Error:
print 'pickle ready'
# merge visual probability and track distribution probability
fusion_st_gallery_ranker(fusion_param)
# evaluate
# todo transfer: no eval by fusion code
# eval_on_train_test(fusion_param, test_mode=True)
def train_fusion(fusion_param, ep=0.5, en=0.01):
# 这里不需要再做一次时空模型建立
# get_predict_tracks(fusion_param)
# get distribution sorted list for probability compute
# store_sorted_deltas(fusion_param)
fusion_st_img_ranker(fusion_param)
# evaluate
# todo transfer: no eval by fusion code
# eval_on_train_test(fusion_param)
def init_strict_img_st_fusion():
# 全局调度入口,会同时做训练集和测试集上的融合与评分
fusion_param = get_fusion_param()
safe_mkdir('data/' + ctrl_msg['data_folder_path'])
get_predict_delta_tracks(fusion_param)
# # only get rand model for train dataset
prepare_rand_folder(fusion_param)
prepare_diff_folder(fusion_param)
ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'] + '_rand'
fusion_param = get_fusion_param()
# 生成随机时空点的时空模型
get_predict_delta_tracks(fusion_param, random=True)
ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'].replace('rand', 'diff')
fusion_param = get_fusion_param()
get_predict_delta_tracks(fusion_param, diff_person=True)
# 改回非随机的train目录
ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'][:-5]
# has prepared more accurate ep, en
print('fusion on training dataset')
iter_strict_img_st_fusion(on_test=False)
# 改成测试目录
print('fusion on test dataset')
ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'][:-4] + 'est'
safe_mkdir('data/' + ctrl_msg['data_folder_path'])
iter_strict_img_st_fusion(on_test=True)
# def init_strict_img_st_fusion():
# # 全局调度入口,会同时做训练集和测试集上的融合与评分
# fusion_param = get_fusion_param()
# print('init predict tracks into different class files')
# # pick predict tracks into different class file
# get_predict_tracks(fusion_param)
# # get distribution sorted list for probability compute
# store_sorted_deltas(fusion_param)
#
# # # only get rand model for train dataset
# print('generate random predict')
# write_rand_pid(fusion_param)
# ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'] + '_rand'
# fusion_param = get_fusion_param()
# # 生成随机时空点的时空模型
# gen_rand_st_model(fusion_param)
#
# # 改回非随机的train目录
# ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'][:-5]
#
# # has prepared more accurate ep, en
# print('fusion on training dataset')
# iter_strict_img_st_fusion(on_test=False)
# # 改成测试目录
# print('fusion on test dataset')
# ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'][:-4] + 'est'
# iter_strict_img_st_fusion(on_test=True)
def iter_strict_img_st_fusion(on_test=False):
"""
call after img classifier update, train with new vision score and ep en
:param on_test:
:return:
"""
fusion_param = get_fusion_param()
# ep, en = get_shot_rate()
if on_test:
test_fusion(fusion_param)
else:
train_fusion(fusion_param)
# update_epen(fusion_param, True)
if __name__ == '__main__':
# img_st_fusion()
# retrain_fusion()
# init_strict_img_st_fusion()
# for i in range(10):
# print('iteration %d' % i)
# ctrl_msg['cross_idx'] = i
# # ctrl_msg['data_folder_path'] = 'top-m2g-std%d-r-train' % i
# # fusion_param = get_fusion_param()
# # get_predict_tracks(fusion_param)
# # store_sorted_deltas(fusion_param)
# # ctrl_msg['data_folder_path'] = 'top-m2g-std%d-r-test' % i
# # iter_strict_img_st_fusion(on_test=True)
# ctrl_msg['data_folder_path'] = 'top-m2g-std%d-test' % i
# iter_strict_img_st_fusion(on_test=True)
# # viz fusion curve
# fusion_param = get_fusion_param()
# get_predict_tracks(fusion_param)
# store_sorted_deltas(fusion_param)
#
# print('generate random predict')
# write_rand_pid(fusion_param)
# ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'] + '_rand'
# fusion_param = get_fusion_param()
# gen_rand_st_model(fusion_param)
#
# ctrl_msg['data_folder_path'] = ctrl_msg['data_folder_path'][:-5]
# fusion_param = get_fusion_param()
# ctrl_msg['data_folder_path'] = 'market_market-train'
# fusion_param = get_fusion_param()
# init_strict_img_st_fusion()
# ctrl_msg['data_folder_path'] = 'market_market-test'
# fusion_param = get_fusion_param()
# os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
# os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
# + 'market' + ' ' + fusion_param['eval_fusion_path'])
#
for i in range(0, 4):
for j in range(0, 4 - i):
ctrl_msg['ep'] = i * 0.25
ctrl_msg['en'] = j * 0.25
ctrl_msg['data_folder_path'] = 'grid_market-train'
fusion_param = get_fusion_param()
init_strict_img_st_fusion()
ctrl_msg['data_folder_path'] = 'grid_market-test'
fusion_param = get_fusion_param()
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
+ 'market' + ' ' + fusion_param['eval_fusion_path'])
# ctrl_msg['ep'] = 0.25
# ctrl_msg['en'] = 0.5
# ctrl_msg['data_folder_path'] = 'grid_market-train'
# fusion_param = get_fusion_param()
# init_strict_img_st_fusion()
# ctrl_msg['data_folder_path'] = 'grid_market-test'
# fusion_param = get_fusion_param()
# os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
# os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
# + 'market' + ' ' + fusion_param['eval_fusion_path'])
# ctrl_msg['ep'] = 0.5
# ctrl_msg['en'] = 0.25
# ctrl_msg['data_folder_path'] = 'grid_market-train'
# fusion_param = get_fusion_param()
# init_strict_img_st_fusion()
# ctrl_msg['data_folder_path'] = 'grid_market-test'
# fusion_param = get_fusion_param()
# os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
# os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
# + 'market' + ' ' + fusion_param['eval_fusion_path'])
# for cv_num in range(10):
# for i in range(0, 4):
# for j in range(0, 4 - i):
# ctrl_msg['ep'] = i * 0.25
# ctrl_msg['en'] = j * 0.25
# ctrl_msg['data_folder_path'] = 'market_grid-cv%d-train' % cv_num
# fusion_param = get_fusion_param()
# init_strict_img_st_fusion()
# ctrl_msg['data_folder_path'] = 'market_grid-cv%d-test' % cv_num
# fusion_param = get_fusion_param()
# os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/cuda/lib64')
# os.system('/home/cwh/anaconda2/bin/python /home/cwh/coding/rank-reid/rank_reid.py 2 '
# + ('grid-cv%d' % cv_num) + ' ' + fusion_param['eval_fusion_path'])
# delta_range, raw_probs, rand_probs, over_probs = fusion_curve(fusion_param)
# viz_fusion_curve(delta_range, [raw_probs, rand_probs, over_probs])
# viz smooth dist
# viz_market_distribution(fusion_param)
| 8,198 | 39.191176 | 107 |
py
|
TFusion
|
TFusion-master/rank-reid/rank_reid.py
|
import sys
from baseline.evaluate import market_result_eval, grid_result_eval
from pretrain.eval import train_pair_predict,test_pair_predict, train_rank_predict, test_rank_predict
from transfer.simple_rank_transfer import rank_transfer_2dataset
def get_source_target_info(source, target):
source_model_path = '/home/cwh/coding/rank-reid/pretrain/%s_pair_pretrain.h5' % source
target_dataset_path = ''
if target == 'market':
target_dataset_path = '/home/cwh/coding/Market-1501'
elif target == 'markets1':
target_dataset_path = '/home/cwh/coding/markets1'
elif target == 'duke':
target_dataset_path = '/home/cwh/coding/DukeMTMC-reID'
elif 'grid' in target:
target_dataset_path = '/home/cwh/coding/grid_train_probe_gallery' + target.replace('grid-cv', '/cross')
return source_model_path, target_dataset_path
def vision_predict(source, target, train_pid_path, train_score_path, test_pid_path, test_score_path):
source_model_path, target_dataset_path = get_source_target_info(source, target)
target_probe_path = target_dataset_path + '/probe'
target_train_path = target_dataset_path + '/train'
target_gallery_path = target_dataset_path + '/test'
train_pair_predict(source_model_path, target_train_path, train_pid_path, train_score_path)
test_pair_predict(source_model_path, target_probe_path, target_gallery_path, test_pid_path, test_score_path)
predict_eval(target, test_pid_path)
def rank_transfer(source, target, target_train_list, fusion_train_rank_pids_path, fusion_train_rank_scores_path):
source_model_path, target_dataset_path = get_source_target_info(source, target)
target_train_path = target_dataset_path + '/train'
target_model_path = source + '_' + target + '-rank_transfer.h5'
rank_transfer_2dataset(source_model_path, target_train_list, target_model_path, target_train_path,
fusion_train_rank_pids_path, fusion_train_rank_scores_path)
return target_model_path
def rank_predict(rank_model_path, target, transfer_train_rank_pids_path, transfer_train_rank_scores_path,
transfer_test_rank_pids_path, transfer_test_rank_scores_path):
source_model_path, target_dataset_path = get_source_target_info(source, target)
target_train_path = target_dataset_path + '/train'
target_probe_path = target_dataset_path + '/probe'
target_gallery_path = target_dataset_path + '/test'
# train_rank_predict(rank_model_path, target_train_path, transfer_train_rank_pids_path, transfer_train_rank_scores_path)
test_rank_predict(rank_model_path, target_probe_path, target_gallery_path, transfer_test_rank_pids_path, transfer_test_rank_scores_path)
predict_eval(target, transfer_test_rank_pids_path)
def predict_eval(target, predict_path):
if target == 'market' or target == 'market-r':
market_result_eval(predict_path,
TEST = '/home/cwh/coding/Market-1501/test', QUERY = '/home/cwh/coding/Market-1501/probe')
elif 'grid' in target:
grid_result_eval(predict_path)
elif 'duke' in target:
market_result_eval(predict_path, log_path='duke_eval.log', TEST = '/home/cwh/coding/DukeMTMC-reID/test', QUERY = '/home/cwh/coding/DukeMTMC-reID/probe')
if __name__ == '__main__':
# source = 'cuhk'
# target = 'market'
# fusion_train_rank_pids_path = '/home/cwh/coding/TrackViz/data/%s_%s-train/cross_filter_pid.log' % (source, target)
# fusion_train_rank_scores_path = '/home/cwh/coding/TrackViz/data/%s_%s-train/cross_filter_score.log' % (source, target)
# transfer_train_rank_pids_path = 'train_rank_pid.log'
# transfer_train_rank_scores_path = 'train_rank_score.log'
# transfer_test_rank_pids_path = 'test_rank_pid.log'
# transfer_test_rank_scores_path = 'test_rank_score.log'
# target_train_list ='dataset/market_train.list'
# rank_model_path = rank_transfer(source, target, target_train_list, fusion_train_rank_pids_path,
# fusion_train_rank_scores_path)
# # rank_model_path = '/home/cwh/coding/rank-reid/i_' + source + '_' + target + '-rank_transfer.h5'
# # rank_model_path = 'transfer/rank_transfer_test.h5'
# rank_predict(rank_model_path, target, transfer_train_rank_pids_path, transfer_train_rank_scores_path,
# transfer_test_rank_pids_path, transfer_test_rank_scores_path)
opt = sys.argv[1]
if opt == '0':
source = sys.argv[2]
target = sys.argv[3]
vision_train_rank_pids_path = sys.argv[4]
vision_train_rank_scores_path = sys.argv[5]
vision_test_rank_pids_path = sys.argv[6]
vision_test_rank_scores_path = sys.argv[7]
vision_predict(source, target,
vision_train_rank_pids_path, vision_train_rank_scores_path,
vision_test_rank_pids_path, vision_test_rank_scores_path)
elif opt == '1':
source = sys.argv[2]
target = sys.argv[3]
fusion_train_rank_pids_path = sys.argv[4]
fusion_train_rank_scores_path = sys.argv[5]
transfer_train_rank_pids_path = sys.argv[6]
transfer_train_rank_scores_path = sys.argv[7]
transfer_test_rank_pids_path = sys.argv[8]
transfer_test_rank_scores_path = sys.argv[9]
target_train_list = sys.argv[10]
rank_model_path = rank_transfer(source, target, target_train_list, fusion_train_rank_pids_path, fusion_train_rank_scores_path)
rank_predict(rank_model_path, target, transfer_train_rank_pids_path, transfer_train_rank_scores_path,
transfer_test_rank_pids_path, transfer_test_rank_scores_path)
elif opt == '2':
target = sys.argv[2]
predict_path = sys.argv[3]
predict_eval(target, predict_path)
else:
pass
| 5,831 | 52.018182 | 160 |
py
|
TFusion
|
TFusion-master/rank-reid/transfer/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/rank-reid/transfer/simple_rank_transfer.py
|
import os
import utils.cuda_util
import numpy as np
from keras import Input
from keras import backend as K
from keras.applications.resnet50 import preprocess_input, ResNet50
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.engine import Model
from keras.layers import Flatten, Lambda, Dense, Conv2D
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.utils import plot_model
from numpy.random import randint
from pretrain.pair_train import eucl_dist
from utils.file_helper import safe_remove
def reid_img_prepare(LIST, TRAIN):
images = []
with open(LIST, 'r') as f:
for line in f:
if 'jp' not in line:
continue
line = line.strip()
img = line.split()[0]
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
images = np.array(images)
return images
def gen_neg_right_img_ids(left_similar_persons, left_similar_matrix, batch_size):
right_img_ids = list()
right_img_idxes = randint(25, 50, size=batch_size)
right_img_scores = list()
for i in range(batch_size):
right_img_ids.append(left_similar_persons[i][right_img_idxes[i]])
right_img_scores.append(left_similar_matrix[i][right_img_idxes[i]])
right_img_ids = np.array(right_img_ids)
return right_img_ids, np.array(right_img_scores)
def gen_pos_right_img_ids(left_similar_persons, left_similar_matrix, batch_size):
right_img_ids = list()
right_img_idxes = randint(0, 25, size=batch_size)
right_img_scores = list()
for i in range(batch_size):
right_img_ids.append(left_similar_persons[i][right_img_idxes[i]])
right_img_scores.append(left_similar_matrix[i][right_img_idxes[i]])
right_img_ids = np.array(right_img_ids)
return right_img_ids, np.array(right_img_scores)
def gen_right_img_infos(cur_epoch, similar_matrix, similar_persons, left_img_ids, img_cnt, batch_size):
pos_prop = 2
if cur_epoch % pos_prop == 0:
# select from last match for negative
left_similar_persons = similar_persons[left_img_ids]
left_similar_matrix = similar_matrix[left_img_ids]
right_img_ids, right_img_scores = gen_pos_right_img_ids(left_similar_persons, left_similar_matrix, batch_size)
else:
# select from last match for negative
left_similar_persons = similar_persons[left_img_ids]
left_similar_matrix = similar_matrix[left_img_ids]
right_img_ids, right_img_scores = gen_neg_right_img_ids(left_similar_persons, left_similar_matrix, batch_size)
right_img_ids = right_img_ids.astype(int)
return right_img_ids, right_img_scores
def triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False):
cur_epoch = 0
img_cnt = len(similar_persons)
while True:
left_img_ids = randint(img_cnt, size=batch_size)
right_img_ids1, right_img_scores1 = gen_right_img_infos(cur_epoch,
similar_matrix, similar_persons,
left_img_ids,
img_cnt, batch_size)
cur_epoch += 1
right_img_ids2, right_img_scores2 = gen_right_img_infos(cur_epoch,
similar_matrix, similar_persons,
left_img_ids,
img_cnt, batch_size)
left_images = train_images[left_img_ids]
right_images1 = train_images[right_img_ids1]
right_images2 = train_images[right_img_ids2]
sub_scores = np.subtract(right_img_scores1, right_img_scores2) # * 10
cur_epoch += 1
# print cur_epoch
if (cur_epoch/2) % 2 == 0:
# print sub_scores
# yield [left_images, right_images1, right_images2], [sub_scores, right_img_scores1, right_img_scores2]
yield [left_images, right_images1, right_images2], [sub_scores]
else:
# print -sub_scores
# yield [left_images, right_images2, right_images1], [-sub_scores, right_img_scores2, right_img_scores1]
yield [left_images, right_images2, right_images1], [-sub_scores]
def sub(inputs):
x, y = inputs
return (x - y) # *10
def cross_entropy_loss(real_score, predict_score):
predict_prob = 1 / (1 + K.exp(-predict_score))
real_prob = 1 / (1 + K.exp(-real_score))
cross_entropy = -real_prob * K.log(predict_prob) - (1 - real_prob) * K.log(1 - predict_prob)
return cross_entropy
def rank_transfer_model(pair_model_path):
pair_model = load_model(pair_model_path)
base_model = pair_model.layers[2]
base_model = Model(inputs=base_model.get_input_at(0), outputs=[base_model.get_output_at(0)], name='resnet50')
# base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
# base_model = Model(inputs=[base_model.input], outputs=[base_model.output], name='resnet50')
for layer in base_model.layers[: len(base_model.layers)/3*2]:
layer.trainable = False
print 'to layer: %d' % (len(base_model.layers)/3*2)
img0 = Input(shape=(224, 224, 3), name='img_0')
img1 = Input(shape=(224, 224, 3), name='img_1')
img2 = Input(shape=(224, 224, 3), name='img_2')
feature0 = Flatten()(base_model(img0))
feature1 = Flatten()(base_model(img1))
feature2 = Flatten()(base_model(img2))
dis1 = Lambda(eucl_dist, name='square1')([feature0, feature1])
dis2 = Lambda(eucl_dist, name='square2')([feature0, feature2])
score1 = Dense(1, activation='sigmoid', name='score1')(dis1)
score2 = Dense(1, activation='sigmoid', name='score2')(dis2)
sub_score = Lambda(sub, name='sub_score')([score1, score2])
model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
# model = Model(inputs=[img0, img1, img2], outputs=[sub_score])
model.get_layer('score1').set_weights(pair_model.get_layer('bin_out').get_weights())
model.get_layer('score2').set_weights(pair_model.get_layer('bin_out').get_weights())
plot_model(model, to_file='rank_model.png')
print(model.summary())
return model
def rank_transfer(train_generator, val_generator, source_model_path, target_model_path, batch_size=48):
model = rank_transfer_model(source_model_path)
plot_model(model, 'rank_model.png')
model.compile(
optimizer=SGD(lr=0.001, momentum=0.9), # 'adam',
# optimizer='adam',
loss={
'sub_score': cross_entropy_loss,
#'score1': 'binary_crossentropy',
#'score2': 'binary_crossentropy'
# 'sub_score': 'mse'
},
loss_weights={
'sub_score': 1,
# 'score1': 0.5,
# 'score2': 0.5
},
# metrics=['accuracy']
)
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=0, mode='auto', epsilon=0.0001,
cooldown=0, min_lr=0)
if 'market-' in target_model_path:
train_data_cnt = 16500
val_data_cnt = 1800
else:
train_data_cnt = 1600
val_data_cnt = 180
model.fit_generator(train_generator,
steps_per_epoch=train_data_cnt / batch_size + 1,
epochs=5,
validation_data=val_generator,
validation_steps=val_data_cnt / batch_size + 1,
callbacks=[
early_stopping,
auto_lr
]
)
safe_remove(target_model_path)
# model.save('simple_rank_transfer.h5')
model.save(target_model_path)
def rank_transfer_2market():
DATASET = '../dataset/Market'
LIST = os.path.join(DATASET, 'pretrain.list')
TRAIN = os.path.join(DATASET, 'bounding_box_train')
train_images = reid_img_prepare(LIST, TRAIN)
batch_size = 16
# similar_persons = np.genfromtxt('../pretrain/train_renew_pid.log', delimiter=' ')
# similar_matrix = np.genfromtxt('../pretrain/train_renew_ac.log', delimiter=' ')
similar_persons = np.genfromtxt('../pretrain/cross_filter_pid.log', delimiter=' ') - 1
similar_matrix = np.genfromtxt('../pretrain/cross_filter_score.log', delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images[: len(train_images)*9/10], batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images[len(train_images)*9/10:], batch_size, similar_persons, similar_matrix, train=False),
'../pretrain/pair_pretrain.h5',
'market2grid.h5',
batch_size=batch_size
)
def rank_transfer_2dataset(source_pair_model_path, target_train_list, target_model_path, target_train_path,
rank_pid_path, rank_score_path):
train_images = reid_img_prepare(target_train_list, target_train_path)
batch_size = 16
similar_persons = np.genfromtxt(rank_pid_path, delimiter=' ')
# if 'cross' in rank_pid_path:
# similar_persons = similar_persons - 1
similar_matrix = np.genfromtxt(rank_score_path, delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False),
source_pair_model_path,
target_model_path,
batch_size=batch_size
)
def two_stage_rank_transfer_2dataset(source_pair_model_path, target_train_list, target_model_path, target_train_path,
rank_pid_path, rank_score_path):
train_images = reid_img_prepare(target_train_list, target_train_path)
batch_size = 16
similar_persons = np.genfromtxt(rank_pid_path, delimiter=' ')
# if 'cross' in rank_pid_path:
# similar_persons = similar_persons - 1
similar_matrix = np.genfromtxt(rank_score_path, delimiter=' ')
rank_transfer(
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=True),
triplet_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False),
source_pair_model_path,
target_model_path,
batch_size=batch_size
)
if __name__ == '__main__':
pair_model = load_model('../pretrain/cuhk_pair_pretrain.h5')
base_model = pair_model.layers[2]
base_model = Model(inputs=base_model.get_input_at(0), outputs=[base_model.get_output_at(0)], name='resnet50')
print isinstance(base_model.layers[-20], Conv2D)
rank_transfer_2dataset('../pretrain/cuhk_pair_pretrain.h5', '../dataset/market_train.list',
'rank_transfer_test.h5',
'/home/cwh/coding/Market-1501/train',
'/home/cwh/coding/TrackViz/data/cuhk_market-train/cross_filter_pid.log',
'/home/cwh/coding/TrackViz/data/cuhk_market-train/cross_filter_score.log')
| 11,654 | 43.484733 | 136 |
py
|
TFusion
|
TFusion-master/rank-reid/baseline/evaluate.py
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
import tensorflow as tf
from keras.applications.resnet50 import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
from utils.file_helper import write, safe_remove
def extract_info(dir_path):
infos = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 's' in image_name or 'f' in image_name:
# market && duke
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
elif 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
else:
continue
infos.append((person, camera))
return infos
def extract_feature(dir_path, net):
features = []
infos = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 'f' in image_name or 's' in image_name:
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
elif 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
else:
continue
image_path = os.path.join(dir_path, image_name)
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = net.predict(x)
features.append(np.squeeze(feature))
infos.append((person, camera))
return features, infos
def similarity_matrix(query_f, test_f):
# Tensorflow graph
# use GPU to calculate the similarity matrix
query_t = tf.placeholder(tf.float32, (None, None))
test_t = tf.placeholder(tf.float32, (None, None))
query_t_norm = tf.nn.l2_normalize(query_t, dim=1)
test_t_norm = tf.nn.l2_normalize(test_t, dim=1)
tensor = tf.matmul(query_t_norm, test_t_norm, transpose_a=False, transpose_b=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
result = sess.run(tensor, {query_t: query_f, test_t: test_f})
print(result.shape)
# descend
return result
def sort_similarity(query_f, test_f):
result = similarity_matrix(query_f, test_f)
result_argsort = np.argsort(-result, axis=1)
return result, result_argsort
def map_rank_quick_eval(query_info, test_info, result_argsort):
# much more faster than hehefan's evaluation
match = []
junk = []
QUERY_NUM = len(query_info)
for q_index, (qp, qc) in enumerate(query_info):
tmp_match = []
tmp_junk = []
for t_index in range(len(test_info)):
p_t_idx = result_argsort[q_index][t_index]
p_info = test_info[int(p_t_idx)]
tp = p_info[0]
tc = p_info[1]
if tp == qp and qc != tc:
tmp_match.append(t_index)
elif tp == qp or tp == -1:
tmp_junk.append(t_index)
match.append(tmp_match)
junk.append(tmp_junk)
rank_1 = 0.0
mAP = 0.0
rank1_list = list()
for idx in range(len(query_info)):
if idx % 100 == 0:
print('evaluate img %d' % idx)
recall = 0.0
precision = 1.0
ap = 0.0
YES = match[idx]
IGNORE = junk[idx]
ig_cnt = 0
for ig in IGNORE:
if ig < YES[0]:
ig_cnt += 1
else:
break
if ig_cnt >= YES[0]:
rank_1 += 1
rank1_list.append(1)
else:
rank1_list.append(0)
for i, k in enumerate(YES):
ig_cnt = 0
for ig in IGNORE:
if ig < k:
ig_cnt += 1
else:
break
cnt = k + 1 - ig_cnt
hit = i + 1
tmp_recall = hit / len(YES)
tmp_precision = hit / cnt
ap = ap + (tmp_recall - recall) * ((precision + tmp_precision) / 2)
recall = tmp_recall
precision = tmp_precision
mAP += ap
rank1_acc = rank_1 / QUERY_NUM
mAP = mAP / QUERY_NUM
print('Rank 1:\t%f' % rank1_acc)
print('mAP:\t%f' % mAP)
np.savetxt('rank_1.log', np.array(rank1_list), fmt='%d')
return rank1_acc, mAP
def train_predict(net, train_path, pid_path, score_path):
net = Model(inputs=[net.input], outputs=[net.get_layer('avg_pool').output])
train_f, test_info = extract_feature(train_path, net)
result, result_argsort = sort_similarity(train_f, train_f)
for i in range(len(result)):
result[i] = result[i][result_argsort[i]]
result = np.array(result)
# ignore top1 because it's the origin image
np.savetxt(score_path, result[:, 1:], fmt='%.4f')
np.savetxt(pid_path, result_argsort[:, 1:], fmt='%d')
return result
def test_predict(net, probe_path, gallery_path, pid_path, score_path):
net = Model(inputs=[net.input], outputs=[net.get_layer('avg_pool').output])
test_f, test_info = extract_feature(gallery_path, net)
query_f, query_info = extract_feature(probe_path, net)
result, result_argsort = sort_similarity(query_f, test_f)
for i in range(len(result)):
result[i] = result[i][result_argsort[i]]
result = np.array(result)
safe_remove(pid_path)
safe_remove(score_path)
np.savetxt(pid_path, result_argsort, fmt='%d')
np.savetxt(score_path, result, fmt='%.4f')
def market_result_eval(predict_path, log_path='market_result_eval.log', TEST='Market-1501/test',
QUERY='Market-1501/probe'):
res = np.genfromtxt(predict_path, delimiter=' ')
print('predict info get, extract gallery info start')
test_info = extract_info(TEST)
print('extract probe info start')
query_info = extract_info(QUERY)
print('start evaluate map and rank acc')
rank1, mAP = map_rank_quick_eval(query_info, test_info, res)
write(log_path, predict_path + '\n')
write(log_path, '%f\t%f\n' % (rank1, mAP))
def grid_result_eval(predict_path, log_path='grid_eval.log'):
pids4probes = np.genfromtxt(predict_path, delimiter=' ')
probe_shoot = [0, 0, 0, 0, 0]
for i, pids in enumerate(pids4probes):
for j, pid in enumerate(pids):
if pid - i == 775:
if j == 0:
for k in range(5):
probe_shoot[k] += 1
elif j < 5:
for k in range(1, 5):
probe_shoot[k] += 1
elif j < 10:
for k in range(2, 5):
probe_shoot[k] += 1
elif j < 20:
for k in range(3, 5):
probe_shoot[k] += 1
elif j < 50:
for k in range(4, 5):
probe_shoot[k] += 1
break
probe_acc = [shoot / len(pids4probes) for shoot in probe_shoot]
write(log_path, predict_path + '\n')
write(log_path, '%.2f\t%.2f\t%.2f\n' % (probe_acc[0], probe_acc[1], probe_acc[2]))
print(predict_path)
print(probe_acc)
if __name__ == '__main__':
market_result_eval('cross_filter_pid.log')
| 7,555 | 31.568966 | 96 |
py
|
TFusion
|
TFusion-master/rank-reid/baseline/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/rank-reid/baseline/train.py
|
from __future__ import division, print_function, absolute_import
import os
from random import shuffle
import numpy as np
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.initializers import RandomNormal
from keras.layers import Dense, Flatten, Dropout
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
def load_mix_data(LIST, TRAIN):
images, labels = [], []
with open(LIST, 'r') as f:
last_label = -1
label_cnt = -1
last_type = ''
for line in f:
line = line.strip()
img = line
lbl = line.split('_')[0]
cur_type = line.split('.')[-1]
if last_label != lbl or last_type != cur_type:
label_cnt += 1
last_label = lbl
last_type = cur_type
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
labels.append(label_cnt)
img_cnt = len(labels)
shuffle_idxes = range(img_cnt)
shuffle(shuffle_idxes)
shuffle_imgs = list()
shuffle_labels = list()
for idx in shuffle_idxes:
shuffle_imgs.append(images[idx])
shuffle_labels.append(labels[idx])
images = np.array(shuffle_imgs)
labels = to_categorical(shuffle_labels)
return images, labels
def load_data(LIST, TRAIN):
images, labels = [], []
with open(LIST, 'r') as f:
last_label = -1
label_cnt = -1
for line in f:
line = line.strip()
img = line
lbl = line.split('_')[0]
if last_label != lbl:
label_cnt += 1
last_label = lbl
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
images.append(img[0])
labels.append(label_cnt)
img_cnt = len(labels)
shuffle_idxes = range(img_cnt)
shuffle(shuffle_idxes)
shuffle_imgs = list()
shuffle_labels = list()
for idx in shuffle_idxes:
shuffle_imgs.append(images[idx])
shuffle_labels.append(labels[idx])
images = np.array(shuffle_imgs)
labels = to_categorical(shuffle_labels)
return images, labels
def softmax_model_pretrain(train_list, train_dir, class_count, target_model_path):
images, labels = load_data(train_list, train_dir)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
# load pre-trained resnet50
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
x = base_model.output
x = Flatten(name='flatten')(x)
x = Dropout(0.5)(x)
x = Dense(class_count, activation='softmax', name='fc8', kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(x)
net = Model(inputs=[base_model.input], outputs=[x])
for layer in net.layers:
layer.trainable = True
# pretrain
batch_size = 16
train_datagen = ImageDataGenerator(
shear_range=0.2,
width_shift_range=0.2, # 0.
height_shift_range=0.2)
net.compile(optimizer=SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
net.fit_generator(
train_datagen.flow(images, labels, batch_size=batch_size),
steps_per_epoch=len(images) / batch_size + 1, epochs=40,
)
net.save(target_model_path)
def softmax_pretrain_on_dataset(source, project_path='/home/cwh/coding/rank-reid', dataset_parent='/home/cwh/coding'):
if source == 'market':
train_list = project_path + '/dataset/market_train.list'
train_dir = dataset_parent + '/Market-1501/train'
class_count = 751
elif source == 'grid':
train_list = project_path + '/dataset/grid_train.list'
train_dir = dataset_parent + '/grid_label'
class_count = 250
elif source == 'cuhk':
train_list = project_path + '/dataset/cuhk_train.list'
train_dir = dataset_parent + '/cuhk01'
class_count = 971
elif source == 'viper':
train_list = project_path + '/dataset/viper_train.list'
train_dir = dataset_parent + '/viper'
class_count = 630
elif source == 'duke':
train_list = project_path + '/dataset/duke_train.list'
train_dir = dataset_parent + '/DukeMTMC-reID/train'
class_count = 702
elif 'grid-cv' in source:
cv_idx = int(source.split('-')[-1])
train_list = project_path + '/dataset/grid-cv/%d.list' % cv_idx
train_dir = dataset_parent + '/underground_reid/cross%d/train' % cv_idx
class_count = 125
elif 'mix' in source:
train_list = project_path + '/dataset/mix.list'
train_dir = dataset_parent + '/cuhk_grid_viper_mix'
class_count = 250 + 971 + 630
else:
train_list = 'unknown'
train_dir = 'unknown'
class_count = -1
softmax_model_pretrain(train_list, train_dir, class_count, '../pretrain/' + source + '_softmax_pretrain.h5')
if __name__ == '__main__':
# sources = ['market', 'grid', 'cuhk', 'viper']
sources = ['market']
for source in sources:
softmax_pretrain_on_dataset(source)
| 5,745 | 33.407186 | 120 |
py
|
TFusion
|
TFusion-master/rank-reid/pretrain/pair_train.py
|
import os
import numpy as np
from keras import Input
from keras import backend as K
from keras.applications.resnet50 import preprocess_input
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.engine import Model
from keras.layers import Lambda, Dense, Dropout, Flatten
from keras.models import load_model
from keras.optimizers import SGD
from keras.preprocessing import image
from keras.utils import plot_model, to_categorical
from numpy.random import randint, shuffle, choice
from baseline.train import softmax_pretrain_on_dataset
def mix_data_prepare(data_list_path, train_dir_path):
class_img_labels = dict()
class_cnt = -1
last_label = -2
last_type = ''
with open(data_list_path, 'r') as f:
for line in f:
line = line.strip()
img = line
lbl = int(line.split('_')[0])
img_type = line.split('.')[-1]
if lbl != last_label or img_type != last_type:
class_cnt = class_cnt + 1
cur_list = list()
class_img_labels[str(class_cnt)] = cur_list
last_label = lbl
last_type = img_type
img = image.load_img(os.path.join(train_dir_path, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
class_img_labels[str(class_cnt)].append(img[0])
return class_img_labels
def reid_data_prepare(data_list_path, train_dir_path):
if 'mix' in data_list_path:
return mix_data_prepare(data_list_path, train_dir_path)
class_img_labels = dict()
class_cnt = -1
last_label = -2
with open(data_list_path, 'r') as f:
for line in f:
line = line.strip()
img = line
lbl = int(line.split('_')[0])
if lbl != last_label:
class_cnt = class_cnt + 1
cur_list = list()
class_img_labels[str(class_cnt)] = cur_list
last_label = lbl
img = image.load_img(os.path.join(train_dir_path, img), target_size=[224, 224])
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
class_img_labels[str(class_cnt)].append(img[0])
return class_img_labels
def pair_generator(class_img_labels, batch_size, train=False):
cur_epoch = 0
pos_prop = 5
while True:
left_label = randint(len(class_img_labels), size=batch_size)
if cur_epoch % pos_prop == 0:
right_label = left_label
else:
right_label = np.copy(left_label)
shuffle(right_label)
# select by label
left_images = list()
right_images = list()
if train:
slice_start = 0
else:
# val
slice_start = 0.9
for i in range(batch_size):
len_left_label_i = len(class_img_labels[str(left_label[i])])
left_images.append(class_img_labels[str(left_label[i])][int(slice_start * len_left_label_i):][
choice(len_left_label_i - int(len_left_label_i * slice_start))])
len_right_label_i = len(class_img_labels[str(right_label[i])])
right_images.append(class_img_labels[str(right_label[i])][int(slice_start * len_right_label_i):][
choice(len_right_label_i - int(len_right_label_i * slice_start))])
left_images = np.array(left_images)
right_images = np.array(right_images)
binary_label = (left_label == right_label).astype(int)
left_label = to_categorical(left_label, num_classes=len(class_img_labels))
right_label = to_categorical(right_label, num_classes=len(class_img_labels))
cur_epoch += 1
yield [left_images, right_images], [left_label, right_label, binary_label]
def eucl_dist(inputs):
x, y = inputs
# return K.mean(K.square((x - y)), axis=1)
return K.square((x - y))
def dis_sigmoid(dis):
return K.expand_dims(2/(1+K.exp(dis)))
def pair_model(source_model_path, num_classes):
softmax_model = load_model(source_model_path)
# base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
base_model = Model(inputs=softmax_model.input, outputs=[softmax_model.get_layer('avg_pool').output], name='resnet50')
img1 = Input(shape=(224, 224, 3), name='img_1')
img2 = Input(shape=(224, 224, 3), name='img_2')
feature1 = Flatten()(base_model(img1))
feature2 = Flatten()(base_model(img2))
dis = Lambda(eucl_dist, name='square')([feature1, feature2])
# judge = Lambda(dis_sigmoid, name='bin_out')(dis)
judge = Dense(1, activation='sigmoid', name='bin_out')(Dropout(0.9)(dis))
category_predict1 = Dense(num_classes, activation='softmax', name='ctg_out_1')(
Dropout(0.9)(feature1)
)
category_predict2 = Dense(num_classes, activation='softmax', name='ctg_out_2')(
Dropout(0.9)(feature2)
)
model = Model(inputs=[img1, img2], outputs=[category_predict1, category_predict2, judge])
model.get_layer('ctg_out_1').set_weights(softmax_model.get_layer('fc8').get_weights())
model.get_layer('ctg_out_2').set_weights(softmax_model.get_layer('fc8').get_weights())
plot_model(model, to_file='model_combined.png')
# for layer in base_model.layers[:-10]:
# layer.trainable = False
for layer in base_model.layers:
layer.trainable = True
return model
def common_lr(epoch):
if epoch < 20:
return 0.01
else:
return 0.001
def pair_tune(source_model_path, train_generator, val_generator, tune_dataset, batch_size=48, num_classes=751):
model = pair_model(source_model_path, num_classes)
model.compile(optimizer=SGD(lr=0.001, momentum=0.9),
loss={'ctg_out_1': 'categorical_crossentropy',
'ctg_out_2': 'categorical_crossentropy',
'bin_out': 'binary_crossentropy'},
loss_weights={
'ctg_out_1': 0.5,
'ctg_out_2': 0.5,
'bin_out': 1.
},
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=4)
auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=0, mode='auto', epsilon=0.0001,
cooldown=0, min_lr=0)
# save_model = ModelCheckpoint('resnet50-{epoch:02d}-{val_ctg_out_1_acc:.2f}.h5', period=2)
model.fit_generator(train_generator,
steps_per_epoch=16500 / batch_size + 1,
epochs=20,
validation_data=val_generator,
validation_steps=1800 / batch_size + 1,
callbacks=[auto_lr, early_stopping])
model.save(tune_dataset + '_pair_pretrain.h5')
def pair_pretrain_on_dataset(source, project_path='/home/cwh/coding/rank-reid', dataset_parent='/home/cwh/coding'):
if source == 'market':
train_list = project_path + '/dataset/market_train.list'
train_dir = dataset_parent + '/Market-1501/train'
class_count = 751
elif source == 'markets1':
train_list = project_path + '/dataset/markets1_train.list'
train_dir = dataset_parent + '/markets1'
class_count = 751
elif source == 'grid':
train_list = project_path + '/dataset/grid_train.list'
train_dir = dataset_parent + '/grid_label'
class_count = 250
elif source == 'cuhk':
train_list = project_path + '/dataset/cuhk_train.list'
train_dir = dataset_parent + '/cuhk01'
class_count = 971
elif source == 'viper':
train_list = project_path + '/dataset/viper_train.list'
train_dir = dataset_parent + '/viper'
class_count = 630
elif source == 'duke':
train_list = project_path + '/dataset/duke_train.list'
train_dir = dataset_parent + '/DukeMTMC-reID/train'
class_count = 702
elif 'grid-cv' in source:
cv_idx = int(source.split('-')[-1])
train_list = project_path + '/dataset/grid-cv/%d.list' % cv_idx
train_dir = dataset_parent + '/grid_train_probe_gallery/cross%d/train' % cv_idx
class_count = 125
elif 'mix' in source:
train_list = project_path + '/dataset/mix.list'
train_dir = dataset_parent + '/cuhk_grid_viper_mix'
class_count = 250 + 971 + 630
else:
train_list = 'unknown'
train_dir = 'unknown'
class_count = -1
class_img_labels = reid_data_prepare(train_list, train_dir)
batch_size = 16
pair_tune(
source + '_softmax_pretrain.h5',
pair_generator(class_img_labels, batch_size=batch_size, train=True),
pair_generator(class_img_labels, batch_size=batch_size, train=False),
source,
batch_size=batch_size, num_classes=class_count
)
if __name__ == '__main__':
sources = ['cuhk_grid_viper_mix']
sources = ['cuhk', 'viper', 'market','duke']
for source in sources:
softmax_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding/')
pair_pretrain_on_dataset(source)
sources = ['grid-cv-%d' % i for i in range(10)]
for source in sources:
softmax_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding')
pair_pretrain_on_dataset(source,
project_path='/home/cwh/coding/rank-reid',
dataset_parent='/home/cwh/coding')
# sources = ['viper']
# for source in sources:
# # softmax_pretrain_on_dataset(source,
# # project_path='/home/cwh/coding/rank-reid',
# # dataset_parent='/home/cwh/coding/')
# pair_pretrain_on_dataset(source)
# sources = ['grid-cv-%d' % i for i in range(10)]
# for source in sources:
# softmax_pretrain_on_dataset(source,
# project_path='/home/cwh/coding/rank-reid',
# dataset_parent='/home/cwh/coding')
# pair_pretrain_on_dataset(source,
# project_path='/home/cwh/coding/rank-reid',
# dataset_parent='/home/cwh/coding')
| 10,663 | 40.173745 | 121 |
py
|
TFusion
|
TFusion-master/rank-reid/pretrain/eval.py
|
# coding=utf-8
import os
from keras import backend as K
from keras.engine import Model
from keras.models import load_model
from keras.preprocessing import image
from baseline.evaluate import train_predict, test_predict, grid_result_eval, market_result_eval
from transfer.simple_rank_transfer import cross_entropy_loss
#
def train_pair_predict(pair_model_path, target_train_path, pid_path, score_path):
model = load_model(pair_model_path)
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
train_predict(model, target_train_path, pid_path, score_path)
def test_pair_predict(pair_model_path, target_probe_path, target_gallery_path, pid_path, score_path):
# todo
model = load_model(pair_model_path)
# model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
# model = Model(inputs=[model.input], outputs=[model.get_layer('avg_pool').output])
test_predict(model, target_probe_path, target_gallery_path, pid_path, score_path)
def extract_imgs(dir_path):
imgs = []
for image_name in sorted(os.listdir(dir_path)):
if '.txt' in image_name:
continue
if 's' not in image_name:
# grid
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1])
elif 's' in image_name:
# market
arr = image_name.split('_')
person = int(arr[0])
camera = int(arr[1][1])
else:
continue
image_path = os.path.join(dir_path, image_name)
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
imgs.append(x)
return imgs
def tf_eucl_dist(inputs):
x, y = inputs
return K.square((x - y))
def avg_eucl_dist(inputs):
x, y = inputs
return K.mean(K.square((x - y)), axis=1)
def train_rank_predict(rank_model_path, target_train_path, pid_path, score_path):
model = load_model(rank_model_path, custom_objects={'cross_entropy_loss': cross_entropy_loss})
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
train_predict(model, target_train_path, pid_path, score_path)
def test_rank_predict(rank_model_path, target_probe_path, target_gallery_path, pid_path, score_path):
model = load_model(rank_model_path, custom_objects={'cross_entropy_loss': cross_entropy_loss})
model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)],
outputs=[model.get_layer('resnet50').get_output_at(0)])
test_predict(model, target_probe_path, target_gallery_path, pid_path, score_path)
def grid_eval(source, transform_dir):
target = 'grid'
for i in range(10):
test_pair_predict(source + '_pair_pretrain.h5',
transform_dir + 'cross%d' % i + '/probe', transform_dir + 'cross%d' % i + '/test',
source + '_' + target + '_pid.log', source + '_' + target + '_score.log')
grid_result_eval(source + '_' + target + '_pid.log', 'gan.log')
def market_eval(source, transform_dir):
target = 'market'
test_pair_predict(source + '_pair_pretrain.h5',
transform_dir + '/probe', transform_dir + '/test',
source + '_' + target + '_pid.log', source + '_' + target + '_score.log')
if __name__ == '__main__':
# market_eval('market', '/home/cwh/coding/Market-1501')
# market_result_eval('market_market_pid.log',
# TEST='/home/cwh/coding/Market-1501/test',
# QUERY='/home/cwh/coding/Market-1501/probe')
# grid_eval('market', '/home/cwh/coding/grid_train_probe_gallery/cross0')
grid_result_eval('/home/cwh/coding/TrackViz/data/market_grid-cv0-test/cross_filter_pid.log')
| 4,102 | 38.07619 | 108 |
py
|
TFusion
|
TFusion-master/rank-reid/pretrain/__init__.py
| 0 | 0 | 0 |
py
|
|
TFusion
|
TFusion-master/rank-reid/utils/file_helper.py
|
import os
def write_line(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content + '\n')
def write(path, content):
with open(path, "a+") as dst_file:
dst_file.write(content)
def read_lines(path):
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
content.append(line)
return content
def read_lines_and(path, on_line):
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
on_line(line)
return content
def read_lines_idx_and(path, on_line):
line_idx = 0
with open(path) as f:
content = list()
while 1:
try:
lines = f.readlines(100)
except UnicodeDecodeError:
f.close()
continue
if not lines:
break
for line in lines:
on_line(line, line_idx)
line_idx += 1
return content
def safe_remove(path):
if os.path.exists(path):
os.remove(path)
return True
else:
return False
def safe_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
| 1,627 | 20.706667 | 40 |
py
|
TFusion
|
TFusion-master/rank-reid/utils/cuda_util.py
|
import os
from keras.backend import set_session
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.6
set_session(tf.Session(config=config))
| 302 | 26.545455 | 64 |
py
|
TFusion
|
TFusion-master/rank-reid/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperopt
|
hyperopt-master/setup.py
|
import re
import setuptools
with open("hyperopt/__init__.py", encoding="utf8") as f:
version = re.search(r"__version__ = \"(.*?)\"", f.read()).group(1)
if version is None:
raise ImportError("Could not find __version__ in hyperopt/__init__.py")
setuptools.setup(
name="hyperopt",
version=version,
packages=setuptools.find_packages(include=["hyperopt*"]),
entry_points={"console_scripts": ["hyperopt-mongo-worker=hyperopt.mongoexp:main"]},
url="https://hyperopt.github.io/hyperopt",
project_urls={
"Source": "https://github.com/hyperopt/hyperopt",
},
author="James Bergstra",
author_email="[email protected]",
description="Distributed Asynchronous Hyperparameter Optimization",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
platforms=["Linux", "OS-X", "Windows"],
license="BSD",
keywords="Bayesian optimization hyperparameter model selection",
include_package_data=True,
requires_python=">=3.7",
install_requires=[
"numpy>=1.17",
"scipy",
"networkx>=2.2",
"future",
"tqdm",
"cloudpickle",
],
extras_require={
"SparkTrials": ["pyspark", "py4j"],
"MongoTrials": "pymongo>=4.0.0",
"ATPE": ["lightgbm", "scikit-learn"],
"dev": ["black", "pre-commit", "nose", "pytest"],
},
tests_require=["nose", "pytest"],
zip_safe=False,
)
| 2,132 | 32.857143 | 87 |
py
|
hyperopt
|
hyperopt-master/docs/autogen.py
|
# This file has been taken from Keras' `docs` module found here:
# https://github.com/keras-team/keras/blob/master/docs/autogen.py
#
import re
import inspect
import os
import shutil
EXCLUDE = {}
PAGES = [
# {
# 'page': 'target.md',
# 'classes': [
# ],
# 'functions': [
# ],
# },
# {
# 'page': 'other_target.md',
# 'all_module_functions': [],
# },
]
ROOT = "http://hyperopt.github.io/hyperopt"
def get_function_signature(function, method=True):
wrapped = getattr(function, "_original_function", None)
if wrapped is None:
signature = inspect.getargspec(function)
else:
signature = inspect.getargspec(wrapped)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults) :], defaults)
args = args[: -len(defaults)]
else:
kwargs = []
signature = [f"{clean_module_name(function.__module__)}.{function.__name__}("]
for arg in args:
signature.append(str(arg))
for key, value in kwargs:
if isinstance(value, str):
value = f"'{value}'"
signature.append(f"{key}={value}")
return ", ".join(signature) + ")"
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace("__init__", cls.__name__)
except (TypeError, AttributeError):
# in case the class inherits from object and does not
# define __init__
class_signature = "{clean_module_name}.{cls_name}()".format(
clean_module_name=clean_module_name(cls.__module__), cls_name=cls.__name__
)
return class_signature
def clean_module_name(name):
assert name[:8] == "hyperopt.", "Invalid module name: %s" % name
return name
def class_to_docs_link(cls):
module_name = clean_module_name(cls.__module__)
module_name = module_name[6:]
link = ROOT + module_name.replace(".", "/") + "#" + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = clean_module_name(cls.__module__)
path = module_name.replace(".", "/")
path += ".py"
line = inspect.getsourcelines(cls)[-1]
link = "https://github.com/hyperopt/" "hyperopt/blob/master/" + path + "#L" + str(
line
)
return "[[source]](" + link + ")"
def code_snippet(snippet):
result = "```python\n"
result += snippet + "\n"
result += "```\n"
return result
def count_leading_spaces(s):
ws = re.search(r"\S", s)
if ws:
return ws.start()
else:
return 0
def process_list_block(docstring, starting_point, leading_spaces, marker):
ending_point = docstring.find("\n\n", starting_point)
block = docstring[
starting_point : (None if ending_point == -1 else ending_point - 1)
]
# Place marker for later reinjection.
docstring = docstring.replace(block, marker)
lines = block.split("\n")
# Remove the computed number of leading white spaces from each line.
lines = [re.sub("^" + " " * leading_spaces, "", line) for line in lines]
# Usually lines have at least 4 additional leading spaces.
# These have to be removed, but first the list roots have to be detected.
top_level_regex = r"^ ([^\s\\\(]+):(.*)"
top_level_replacement = r"- __\1__:\2"
lines = [re.sub(top_level_regex, top_level_replacement, line) for line in lines]
# All the other lines get simply the 4 leading space (if present) removed
lines = [re.sub(r"^ ", "", line) for line in lines]
# Fix text lines after lists
indent = 0
text_block = False
for i in range(len(lines)):
line = lines[i]
spaces = re.search(r"\S", line)
if spaces:
# If it is a list element
if line[spaces.start()] == "-":
indent = spaces.start() + 1
if text_block:
text_block = False
lines[i] = "\n" + line
elif spaces.start() < indent:
text_block = True
indent = spaces.start()
lines[i] = "\n" + line
else:
text_block = False
indent = 0
block = "\n".join(lines)
return docstring, block
def process_docstring(docstring):
# First, extract code blocks and process them.
code_blocks = []
if "```" in docstring:
tmp = docstring[:]
while "```" in tmp:
tmp = tmp[tmp.find("```") :]
index = tmp[3:].find("```") + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
docstring = docstring.replace(snippet, "$CODE_BLOCK_%d" % len(code_blocks))
snippet_lines = snippet.split("\n")
# Remove leading spaces.
num_leading_spaces = snippet_lines[-1].find("`")
snippet_lines = [snippet_lines[0]] + [
line[num_leading_spaces:] for line in snippet_lines[1:]
]
# Most code snippets have 3 or 4 more leading spaces
# on inner lines, but not all. Remove them.
inner_lines = snippet_lines[1:-1]
leading_spaces = None
for line in inner_lines:
if not line or line[0] == "\n":
continue
spaces = count_leading_spaces(line)
if leading_spaces is None:
leading_spaces = spaces
if spaces < leading_spaces:
leading_spaces = spaces
if leading_spaces:
snippet_lines = (
[snippet_lines[0]]
+ [line[leading_spaces:] for line in snippet_lines[1:-1]]
+ [snippet_lines[-1]]
)
snippet = "\n".join(snippet_lines)
code_blocks.append(snippet)
tmp = tmp[index:]
# Format docstring lists.
section_regex = r"\n( +)# (.*)\n"
section_idx = re.search(section_regex, docstring)
shift = 0
sections = {}
while section_idx and section_idx.group(2):
anchor = section_idx.group(2)
leading_spaces = len(section_idx.group(1))
shift += section_idx.end()
marker = "$" + anchor.replace(" ", "_") + "$"
docstring, content = process_list_block(
docstring, shift, leading_spaces, marker
)
sections[marker] = content
section_idx = re.search(section_regex, docstring[shift:])
# Format docstring section titles.
docstring = re.sub(r"\n(\s+)# (.*)\n", r"\n\1__\2__\n\n", docstring)
# Strip all remaining leading spaces.
lines = docstring.split("\n")
docstring = "\n".join([line.lstrip(" ") for line in lines])
# Reinject list blocks.
for marker, content in sections.items():
docstring = docstring.replace(marker, content)
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
docstring = docstring.replace("$CODE_BLOCK_%d" % i, code_block)
return docstring
print("Cleaning up existing sources directory.")
if os.path.exists("sources"):
shutil.rmtree("sources")
print("Populating sources directory with templates.")
for subdir, dirs, fnames in os.walk("templates"):
for fname in fnames:
new_subdir = subdir.replace("templates", "sources")
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == ".md":
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace("templates", "sources")
shutil.copy(fpath, new_fpath)
def read_file(path):
with open(path) as f:
return f.read()
def collect_class_methods(cls, methods):
if isinstance(methods, (list, tuple)):
return [getattr(cls, m) if isinstance(m, str) else m for m in methods]
methods = []
for _, method in inspect.getmembers(cls, predicate=inspect.isroutine):
if method.__name__[0] == "_" or method.__name__ in EXCLUDE:
continue
methods.append(method)
return methods
def render_function(function, method=True):
subblocks = []
signature = get_function_signature(function, method=method)
if method:
signature = signature.replace(clean_module_name(function.__module__) + ".", "")
subblocks.append("### " + function.__name__ + "\n")
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
return "\n\n".join(subblocks)
def read_page_data(page_data, type):
assert type in ["classes", "functions", "methods"]
data = page_data.get(type, [])
for module in page_data.get(f"all_module_{type}", []):
module_data = []
for name in dir(module):
if name[0] == "_" or name in EXCLUDE:
continue
module_member = getattr(module, name)
if (
inspect.isclass(module_member)
and type == "classes"
or inspect.isfunction(module_member)
and type == "functions"
):
instance = module_member
if module.__name__ in instance.__module__:
if instance not in module_data:
module_data.append(instance)
module_data.sort(key=lambda x: id(x))
data += module_data
return data
if __name__ == "__main__":
readme = read_file("../README.md")
index = read_file("templates/index.md")
index = index.replace("{{autogenerated}}", readme[readme.find("##") :])
with open("sources/index.md", "w") as f:
f.write(index)
print("Generating Hyperopt docs")
for page_data in PAGES:
classes = read_page_data(page_data, "classes")
blocks = []
for element in classes:
if not isinstance(element, (list, tuple)):
element = (element, [])
cls = element[0]
subblocks = []
signature = get_class_signature(cls)
subblocks.append(
'<span style="float:right;">' + class_to_source_link(cls) + "</span>"
)
if element[1]:
subblocks.append("## " + cls.__name__ + " class\n")
else:
subblocks.append("### " + cls.__name__ + "\n")
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
methods = collect_class_methods(cls, element[1])
if methods:
subblocks.append("\n---")
subblocks.append("## " + cls.__name__ + " methods\n")
subblocks.append(
"\n---\n".join(
[render_function(method, method=True) for method in methods]
)
)
blocks.append("\n".join(subblocks))
methods = read_page_data(page_data, "methods")
for method in methods:
blocks.append(render_function(method, method=True))
functions = read_page_data(page_data, "functions")
for function in functions:
blocks.append(render_function(function, method=False))
if not blocks:
raise RuntimeError("Found no content for page " + page_data["page"])
mkdown = "\n----\n\n".join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data["page"]
path = os.path.join("sources", page_name)
if os.path.exists(path):
template = read_file(path)
assert "{{autogenerated}}" in template, (
"Template found for " + path + " but missing {{autogenerated}}" " tag."
)
mkdown = template.replace("{{autogenerated}}", mkdown)
print("...inserting autogenerated content into template:", path)
else:
print("...creating new page with autogenerated content:", path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(path, "w") as f:
f.write(mkdown)
| 12,407 | 32.994521 | 87 |
py
|
hyperopt
|
hyperopt-master/docs/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperopt
|
hyperopt-master/hyperopt/main.py
|
#!/usr/bin/env python
"""
Entry point for bin/* scripts
"""
from future import standard_library
import logging
import os
from . import utils
from .base import SerialExperiment
import sys
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import cloudpickle as pickler
except Exception as e:
logger.info(
'Failed to load cloudpickle, try installing cloudpickle via "pip install '
'cloudpickle" for enhanced pickling support.'
)
import pickle as pickler
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
def main_search():
from optparse import OptionParser
parser = OptionParser(usage="%prog [options] [<bandit> <bandit_algo>]")
parser.add_option(
"--load",
default="",
dest="load",
metavar="FILE",
help="unpickle experiment from here on startup",
)
parser.add_option(
"--save",
default="experiment.pkl",
dest="save",
metavar="FILE",
help="pickle experiment to here on exit",
)
parser.add_option(
"--steps",
dest="steps",
default="100",
metavar="N",
help="exit after queuing this many jobs (default: 100)",
)
parser.add_option(
"--workdir",
dest="workdir",
default=os.path.expanduser("~/.hyperopt.workdir"),
help="create workdirs here",
metavar="DIR",
)
parser.add_option(
"--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor \
file format: pickle of dictionary containing two keys,\
{'args' : tuple of positional arguments, \
'kwargs' : dictionary of keyword arguments}",
)
parser.add_option(
"--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys: 'args', a tuple of positional arguments, and "
"'kwargs', a dictionary of keyword arguments. "
"NOTE: bandit is pre-pended as first element of arg tuple.",
)
(options, args) = parser.parse_args()
try:
bandit_json, bandit_algo_json = args
except:
parser.print_help()
return -1
try:
if not options.load:
raise OSError()
handle = open(options.load, "rb")
self = pickler.load(handle)
handle.close()
except OSError:
bandit = utils.get_obj(bandit_json, argfile=options.bandit_argfile)
bandit_algo = utils.get_obj(
bandit_algo_json, argfile=options.bandit_algo_argfile, args=(bandit,)
)
self = SerialExperiment(bandit_algo)
try:
self.run(int(options.steps))
finally:
if options.save:
pickler.dump(self, open(options.save, "wb"))
def main(cmd, fn_pos=1):
"""
Entry point for bin/* scripts
XXX
"""
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
try:
runner = dict(
search="main_search", dryrun="main_dryrun", plot_history="main_plot_history"
)[cmd]
except KeyError:
logger.error("Command not recognized: %s" % cmd)
# XXX: Usage message
sys.exit(1)
try:
# TODO: argv1 never used
argv1 = sys.argv[fn_pos]
except IndexError:
logger.error("Module name required (XXX: print Usage)")
return 1
fn = utils.load_tokens(sys.argv[fn_pos].split(".") + [runner])
sys.exit(fn(sys.argv[fn_pos + 1 :]))
if __name__ == "__main__":
cmd = sys.argv[1]
sys.exit(main(cmd, 2))
| 3,818 | 26.875912 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/base.py
|
"""Base classes / Design
The design is that there are three components fitting together in this project:
- Trials - a list of documents including at least sub-documents:
['spec'] - the specification of hyper-parameters for a job
['result'] - the result of Domain.evaluate(). Typically includes:
['status'] - one of the STATUS_STRINGS
['loss'] - real-valued scalar that hyperopt is trying to minimize
['idxs'] - compressed representation of spec
['vals'] - compressed representation of spec
['tid'] - trial id (unique in Trials list)
- Domain - specifies a search problem
- Ctrl - a channel for two-way communication
between an Experiment and Domain.evaluate.
Experiment subclasses may subclass Ctrl to match. For example, if an
experiment is going to dispatch jobs in other threads, then an
appropriate thread-aware Ctrl subclass should go with it.
"""
import numbers
from past.builtins import basestring
import logging
import datetime
import sys
import numpy as np
try:
import bson # -- comes with pymongo
from bson.objectid import ObjectId
have_bson = True
except ImportError:
have_bson = False
from . import pyll
from .pyll.stochastic import recursive_set_rng_kwarg
from .exceptions import (
DuplicateLabel,
InvalidTrial,
InvalidResultStatus,
InvalidLoss,
AllTrialsFailed,
)
from .utils import pmin_sampled
from .utils import use_obj_for_literal_in_memo
from .vectorize import VectorizeHelper
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
logger = logging.getLogger(__name__)
# -- STATUS values
# An eval_fn returning a dictionary must have a status key with
# one of these values. They are used by optimization routines
# and plotting functions.
STATUS_NEW = "new"
STATUS_RUNNING = "running"
STATUS_SUSPENDED = "suspended"
STATUS_OK = "ok"
STATUS_FAIL = "fail"
STATUS_STRINGS = (
"new", # computations have not started
"running", # computations are in prog
"suspended", # computations have been suspended, job is not finished
"ok", # computations are finished, terminated normally
"fail",
) # computations are finished, terminated with error
# - result['status_fail'] should contain more info
# -- JOBSTATE values
# These are used internally by the scheduler.
# These values are used to communicate between an Experiment
# and a worker process. Consider moving them to mongoexp.
# -- named constants for job execution pipeline
JOB_STATE_NEW = 0
JOB_STATE_RUNNING = 1
JOB_STATE_DONE = 2
JOB_STATE_ERROR = 3
JOB_STATE_CANCEL = 4
JOB_STATES = [
JOB_STATE_NEW,
JOB_STATE_RUNNING,
JOB_STATE_DONE,
JOB_STATE_ERROR,
JOB_STATE_CANCEL,
]
JOB_VALID_STATES = {JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE}
TRIAL_KEYS = [
"tid",
"spec",
"result",
"misc",
"state",
"owner",
"book_time",
"refresh_time",
"exp_key",
]
TRIAL_MISC_KEYS = ["tid", "cmd", "idxs", "vals"]
def _all_same(*args):
return 1 == len(set(args))
def SONify(arg, memo=None):
if not have_bson:
return arg
add_arg_to_raise = True
try:
if memo is None:
memo = {}
if id(arg) in memo:
rval = memo[id(arg)]
if isinstance(arg, ObjectId):
rval = arg
elif isinstance(arg, datetime.datetime):
rval = arg
elif isinstance(arg, np.floating):
rval = float(arg)
elif isinstance(arg, np.integer):
rval = int(arg)
elif isinstance(arg, (list, tuple)):
rval = type(arg)([SONify(ai, memo) for ai in arg])
elif isinstance(arg, dict):
rval = {SONify(k, memo): SONify(v, memo) for k, v in list(arg.items())}
elif isinstance(arg, (basestring, float, int, type(None))):
rval = arg
elif isinstance(arg, np.ndarray):
if arg.ndim == 0:
rval = SONify(arg.sum())
else:
rval = list(map(SONify, arg)) # N.B. memo None
# -- put this after ndarray because ndarray not hashable
elif isinstance(arg, bool):
rval = int(arg)
else:
add_arg_to_raise = False
raise TypeError("SONify", arg)
except Exception as e:
if add_arg_to_raise:
e.args = e.args + (arg,)
raise
memo[id(rval)] = rval
return rval
def miscs_update_idxs_vals(miscs, idxs, vals, assert_all_vals_used=True, idxs_map=None):
"""
Unpack the idxs-vals format into the list of dictionaries that is `misc`.
idxs_map: a dictionary of id->id mappings so that the misc['idxs'] can
contain different numbers than the idxs argument. XXX CLARIFY
"""
if idxs_map is None:
idxs_map = {}
assert set(idxs.keys()) == set(vals.keys())
misc_by_id = {m["tid"]: m for m in miscs}
for m in miscs:
m["idxs"] = {key: [] for key in idxs}
m["vals"] = {key: [] for key in idxs}
for key in idxs:
assert len(idxs[key]) == len(vals[key])
for tid, val in zip(idxs[key], vals[key]):
tid = idxs_map.get(tid, tid)
if assert_all_vals_used or tid in misc_by_id:
misc_by_id[tid]["idxs"][key] = [tid]
misc_by_id[tid]["vals"][key] = [val]
return miscs
def miscs_to_idxs_vals(miscs, keys=None):
if keys is None:
if len(miscs) == 0:
raise ValueError("cannot infer keys from empty miscs")
keys = list(miscs[0]["idxs"].keys())
idxs, vals = {k: [] for k in keys}, {k: [] for k in keys}
for misc in miscs:
for node_id in idxs:
t_idxs = misc["idxs"][node_id]
t_vals = misc["vals"][node_id]
assert len(t_idxs) == len(t_vals)
assert t_idxs == [] or t_idxs == [misc["tid"]]
idxs[node_id].extend(t_idxs)
vals[node_id].extend(t_vals)
return idxs, vals
def spec_from_misc(misc):
spec = {}
for k, v in list(misc["vals"].items()):
if len(v) == 0:
pass
elif len(v) == 1:
spec[k] = v[0]
else:
raise NotImplementedError("multiple values", (k, v))
return spec
def validate_timeout(timeout):
if timeout is not None and (
not isinstance(timeout, numbers.Number)
or timeout <= 0
or isinstance(timeout, bool)
):
raise Exception(
"The timeout argument should be None or a positive value. "
f"Given value: {timeout}"
)
def validate_loss_threshold(loss_threshold):
if loss_threshold is not None and (
not isinstance(loss_threshold, numbers.Number)
or isinstance(loss_threshold, bool)
):
raise Exception(
"The loss_threshold argument should be None or a numeric value. "
f"Given value: {loss_threshold}"
)
class Trials:
"""Database interface supporting data-driven model-based optimization.
The model-based optimization algorithms used by hyperopt's fmin function
work by analyzing samples of a response surface--a history of what points
in the search space were tested, and what was discovered by those tests.
A Trials instance stores that history and makes it available to fmin and
to the various optimization algorithms.
This class (`base.Trials`) is a pure-Python implementation of the database
in terms of lists of dictionaries. Subclass `mongoexp.MongoTrials`
implements the same API in terms of a mongodb database running in another
process. Other subclasses may be implemented in future.
The elements of `self.trials` represent all of the completed, in-progress,
and scheduled evaluation points from an e.g. `fmin` call.
Each element of `self.trials` is a dictionary with *at least* the following
keys:
* **tid**: a unique trial identification object within this Trials instance
usually it is an integer, but it isn't obvious that other sortable,
hashable objects couldn't be used at some point.
* **result**: a sub-dictionary representing what was returned by the fmin
evaluation function. This sub-dictionary has a key 'status' with a value
from `STATUS_STRINGS` and the status is `STATUS_OK`, then there should be
a 'loss' key as well with a floating-point value. Other special keys in
this sub-dictionary may be used by optimization algorithms (see them
for details). Other keys in this sub-dictionary can be used by the
evaluation function to store miscellaneous diagnostics and debugging
information.
* **misc**: despite generic name, this is currently where the trial's
hyperparameter assignments are stored. This sub-dictionary has two
elements: `'idxs'` and `'vals'`. The `vals` dictionary is
a sub-sub-dictionary mapping each hyperparameter to either `[]` (if the
hyperparameter is inactive in this trial), or `[<val>]` (if the
hyperparameter is active). The `idxs` dictionary is technically
redundant -- it is the same as `vals` but it maps hyperparameter names
to either `[]` or `[<tid>]`.
"""
asynchronous = False
def __init__(self, exp_key=None, refresh=True):
self._ids = set()
self._dynamic_trials = []
self._exp_key = exp_key
self.attachments = {}
if refresh:
self.refresh()
def view(self, exp_key=None, refresh=True):
rval = object.__new__(self.__class__)
rval._exp_key = exp_key
rval._ids = self._ids
rval._dynamic_trials = self._dynamic_trials
rval.attachments = self.attachments
if refresh:
rval.refresh()
return rval
def aname(self, trial, name):
return "ATTACH::{}::{}".format(trial["tid"], name)
def trial_attachments(self, trial):
"""
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __contains__(_self, name):
return self.aname(trial, name) in self.attachments
def __getitem__(_self, name):
return self.attachments[self.aname(trial, name)]
def __setitem__(_self, name, value):
self.attachments[self.aname(trial, name)] = value
def __delitem__(_self, name):
del self.attachments[self.aname(trial, name)]
return Attachments()
def __iter__(self):
try:
return iter(self._trials)
except AttributeError:
print("You have to refresh before you iterate", file=sys.stderr)
raise
def __len__(self):
try:
return len(self._trials)
except AttributeError:
print("You have to refresh before you compute len", file=sys.stderr)
raise
def __getitem__(self, item):
# -- how to make it obvious whether indexing is by _trials position
# or by tid if both are integers?
raise NotImplementedError("")
def refresh(self):
# In MongoTrials, this method fetches from database
if self._exp_key is None:
self._trials = [
tt for tt in self._dynamic_trials if tt["state"] in JOB_VALID_STATES
]
else:
self._trials = [
tt
for tt in self._dynamic_trials
if (tt["state"] in JOB_VALID_STATES and tt["exp_key"] == self._exp_key)
]
self._ids.update([tt["tid"] for tt in self._trials])
@property
def trials(self):
return self._trials
@property
def tids(self):
return [tt["tid"] for tt in self._trials]
@property
def specs(self):
return [tt["spec"] for tt in self._trials]
@property
def results(self):
return [tt["result"] for tt in self._trials]
@property
def miscs(self):
return [tt["misc"] for tt in self._trials]
@property
def idxs_vals(self):
return miscs_to_idxs_vals(self.miscs)
@property
def idxs(self):
return self.idxs_vals[0]
@property
def vals(self):
return self.idxs_vals[1]
def assert_valid_trial(self, trial):
if not (hasattr(trial, "keys") and hasattr(trial, "values")):
raise InvalidTrial("trial should be dict-like", trial)
for key in TRIAL_KEYS:
if key not in trial:
raise InvalidTrial("trial missing key %s", key)
for key in TRIAL_MISC_KEYS:
if key not in trial["misc"]:
raise InvalidTrial('trial["misc"] missing key', key)
if trial["tid"] != trial["misc"]["tid"]:
raise InvalidTrial("tid mismatch between root and misc", trial)
# -- check for SON-encodable
if have_bson:
try:
bson.BSON.encode(trial)
except:
# TODO: save the trial object somewhere to inspect, fix, re-insert
# so that precious data is not simply deallocated and lost.
print("-" * 80)
print("CAN'T ENCODE")
print("-" * 80)
raise
if trial["exp_key"] != self._exp_key:
raise InvalidTrial("wrong exp_key", (trial["exp_key"], self._exp_key))
# XXX how to assert that tids are unique?
return trial
def _insert_trial_docs(self, docs):
"""insert with no error checking"""
rval = [doc["tid"] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
def insert_trial_doc(self, doc):
"""insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
"""
doc = self.assert_valid_trial(SONify(doc))
return self._insert_trial_docs([doc])[0]
# refreshing could be done fast in this base implementation, but with
# a real DB the steps should be separated.
def insert_trial_docs(self, docs):
"""trials - something like is returned by self.new_trial_docs()"""
docs = [self.assert_valid_trial(SONify(doc)) for doc in docs]
return self._insert_trial_docs(docs)
def new_trial_ids(self, n):
aa = len(self._ids)
rval = list(range(aa, aa + n))
self._ids.update(rval)
return rval
def new_trial_docs(self, tids, specs, results, miscs):
assert len(tids) == len(specs) == len(results) == len(miscs)
trials_docs = []
for tid, spec, result, misc in zip(tids, specs, results, miscs):
doc = {
"state": JOB_STATE_NEW,
"tid": tid,
"spec": spec,
"result": result,
"misc": misc,
"exp_key": self._exp_key,
"owner": None,
"version": 0,
"book_time": None,
"refresh_time": None,
}
trials_docs.append(doc)
return trials_docs
def source_trial_docs(self, tids, specs, results, miscs, sources):
assert len(tids) == len(specs) == len(results) == len(miscs) == len(sources)
rval = []
for tid, spec, result, misc, source in zip(
tids, specs, results, miscs, sources
):
doc = dict(
version=0,
tid=tid,
spec=spec,
result=result,
misc=misc,
state=source["state"],
exp_key=source["exp_key"],
owner=source["owner"],
book_time=source["book_time"],
refresh_time=source["refresh_time"],
)
# -- ensure that misc has the following fields,
# some of which may already by set correctly.
assign = ("tid", tid), ("cmd", None), ("from_tid", source["tid"])
for k, v in assign:
assert doc["misc"].setdefault(k, v) == v
rval.append(doc)
return rval
def delete_all(self):
self._dynamic_trials = []
self.attachments = {}
self.refresh()
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
if arg in JOB_STATES:
queue = [doc for doc in trials if doc["state"] == arg]
elif hasattr(arg, "__iter__"):
states = set(arg)
assert all([x in JOB_STATES for x in states])
queue = [doc for doc in trials if doc["state"] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
def count_by_state_unsynced(self, arg):
"""
Return trial counts that count_by_state_synced would return if we
called refresh() first.
"""
if self._exp_key is not None:
exp_trials = [
tt for tt in self._dynamic_trials if tt["exp_key"] == self._exp_key
]
else:
exp_trials = self._dynamic_trials
return self.count_by_state_synced(arg, trials=exp_trials)
def losses(self, bandit=None):
if bandit is None:
return [r.get("loss") for r in self.results]
return list(map(bandit.loss, self.results, self.specs))
def statuses(self, bandit=None):
if bandit is None:
return [r.get("status") for r in self.results]
return list(map(bandit.status, self.results, self.specs))
def average_best_error(self, bandit=None):
"""Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
"""
if bandit is None:
results = self.results
loss = [r["loss"] for r in results if r["status"] == STATUS_OK]
loss_v = [
r.get("loss_variance", 0) for r in results if r["status"] == STATUS_OK
]
true_loss = [
r.get("true_loss", r["loss"])
for r in results
if r["status"] == STATUS_OK
]
else:
def fmap(f):
rval = np.asarray(
[
f(r, s)
for (r, s) in zip(self.results, self.specs)
if bandit.status(r) == STATUS_OK
]
).astype("float")
if not np.all(np.isfinite(rval)):
raise ValueError()
return rval
loss = fmap(bandit.loss)
loss_v = fmap(bandit.loss_variance)
true_loss = fmap(bandit.true_loss)
loss3 = list(zip(loss, loss_v, true_loss))
if not loss3:
raise ValueError("Empty loss vector")
loss3.sort()
loss3 = np.asarray(loss3)
if np.all(loss3[:, 1] == 0):
best_idx = np.argmin(loss3[:, 0])
return loss3[best_idx, 2]
else:
cutoff = 0
sigma = np.sqrt(loss3[0][1])
while cutoff < len(loss3) and loss3[cutoff][0] < loss3[0][0] + 3 * sigma:
cutoff += 1
pmin = pmin_sampled(loss3[:cutoff, 0], loss3[:cutoff, 1])
avg_true_loss = (pmin * loss3[:cutoff, 2]).sum()
return avg_true_loss
@property
def best_trial(self):
"""
Trial with lowest non-NaN loss and status=STATUS_OK.
If no such trial exists, returns None.
"""
candidates = [
t
for t in self.trials
if t["result"]["status"] == STATUS_OK and not np.isnan(t["result"]["loss"])
]
if not candidates:
raise AllTrialsFailed
losses = [float(t["result"]["loss"]) for t in candidates]
if len(losses) == 0:
return None
best = np.nanargmin(losses)
return candidates[best]
@property
def argmin(self):
best_trial = self.best_trial
vals = best_trial["misc"]["vals"]
# unpack the one-element lists to values
# and skip over the 0-element lists
rval = {}
for k, v in list(vals.items()):
if v:
rval[k] = v[0]
return rval
def fmin(
self,
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
max_queue_len=1,
rstate=None,
verbose=False,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
return_argmin=True,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
show_progressbar : bool or context manager, default True.
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
"""
# -- Stop-gap implementation!
# fmin should have been a Trials method in the first place
# but for now it's still sitting in another file.
from .fmin import fmin
return fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
trials=self,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
allow_trials_fmin=False, # -- prevent recursion
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
def trials_from_docs(docs, validate=True, **kwargs):
"""Construct a Trials base class instance from a list of trials documents"""
rval = Trials(**kwargs)
if validate:
rval.insert_trial_docs(docs)
else:
rval._insert_trial_docs(docs)
rval.refresh()
return rval
class Ctrl:
"""Control object for interruptible, checkpoint-able evaluation"""
info = logger.info
warn = logger.warning
error = logger.error
debug = logger.debug
def __init__(self, trials, current_trial=None):
# -- attachments should be used like
# attachments[key]
# attachments[key] = value
# where key and value are strings. Client code should not
# expect any dictionary-like behaviour beyond that (no update)
if trials is None:
self.trials = Trials()
else:
self.trials = trials
self.current_trial = current_trial
def checkpoint(self, r=None):
assert self.current_trial in self.trials._trials
if r is not None:
self.current_trial["result"] = r
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
def inject_results(self, specs, results, miscs, new_tids=None):
"""Inject new results into self.trials
Returns ??? XXX
new_tids can be None, in which case new tids will be generated
automatically
"""
trial = self.current_trial
assert trial is not None
num_news = len(specs)
assert len(specs) == len(results) == len(miscs)
if new_tids is None:
new_tids = self.trials.new_trial_ids(num_news)
new_trials = self.trials.source_trial_docs(
tids=new_tids, specs=specs, results=results, miscs=miscs, sources=[trial]
)
for t in new_trials:
t["state"] = JOB_STATE_DONE
return self.trials.insert_trial_docs(new_trials)
class Domain:
"""Picklable representation of search space and evaluation function."""
rec_eval_print_node_on_error = False
# -- the Ctrl object is not used directly, but rather
# a live Ctrl instance is inserted for the pyll_ctrl
# in self.evaluate so that it can be accessed from within
# the pyll graph describing the search space.
pyll_ctrl = pyll.as_apply(Ctrl)
def __init__(
self,
fn,
expr,
workdir=None,
pass_expr_memo_ctrl=None,
name=None,
loss_target=None,
):
"""
Parameters
----------
fn : callable
This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)
expr : hyperopt.pyll.Apply
This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)
workdir : string (or None)
If non-None, the current working directory will be `workdir`while
`expr` and `fn` are evaluated. (XXX Currently only respected by
jobs run via MongoWorker)
pass_expr_memo_ctrl : bool
If True, `fn` will be called like this:
`fn(self.expr, memo, ctrl)`,
where `memo` is a dictionary mapping `Apply` nodes to their
computed values, and `ctrl` is a `Ctrl` instance for communicating
with a Trials database. This lower-level calling convention is
useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
in some customized way.
name : string (or None)
Label, used for pretty-printing.
loss_target : float (or None)
The actual or estimated minimum of `fn`.
Some optimization algorithms may behave differently if their first
objective is to find an input that achieves a certain value,
rather than the more open-ended objective of pure minimization.
XXX: Move this from Domain to be an fmin arg.
"""
self.fn = fn
if pass_expr_memo_ctrl is None:
self.pass_expr_memo_ctrl = getattr(fn, "fmin_pass_expr_memo_ctrl", False)
else:
self.pass_expr_memo_ctrl = pass_expr_memo_ctrl
self.expr = pyll.as_apply(expr)
self.params = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
if label in self.params:
raise DuplicateLabel(label)
self.params[label] = node.arg["obj"]
self.loss_target = loss_target
self.name = name
self.workdir = workdir
self.s_new_ids = pyll.Literal("new_ids") # -- list at eval-time
before = pyll.dfs(self.expr)
# -- raises exception if expr contains cycles
pyll.toposort(self.expr)
vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids)
# -- raises exception if v_expr contains cycles
pyll.toposort(vh.v_expr)
idxs_by_label = vh.idxs_by_label()
vals_by_label = vh.vals_by_label()
after = pyll.dfs(self.expr)
# -- try to detect if VectorizeHelper screwed up anything inplace
assert before == after
assert set(idxs_by_label.keys()) == set(vals_by_label.keys())
assert set(idxs_by_label.keys()) == set(self.params.keys())
self.s_rng = pyll.Literal("rng-placeholder")
# -- N.B. operates inplace:
self.s_idxs_vals = recursive_set_rng_kwarg(
pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng
)
# -- raises an exception if no topological ordering exists
pyll.toposort(self.s_idxs_vals)
# -- Protocol for serialization.
# self.cmd indicates to e.g. MongoWorker how this domain
# should be [un]serialized.
# XXX This mechanism deserves review as support for ipython
# workers improves.
self.cmd = ("domain_attachment", "FMinIter_Domain")
def memo_from_config(self, config):
memo = {}
for node in pyll.dfs(self.expr):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
# -- hack because it's not really garbagecollected
# this does have the desired effect of crashing the
# function if rec_eval actually needs a value that
# the the optimization algorithm thought to be unnecessary
memo[node] = config.get(label, pyll.base.GarbageCollected)
return memo
def evaluate(self, config, ctrl, attach_attachments=True):
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
rval = self.fn(pyll_rval)
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
def evaluate_async(self, config, ctrl, attach_attachments=True):
"""
this is the first part of async evaluation for ipython parallel engines (see ipy.py)
This breaks evaluate into two parts to allow for the apply_async call
to only pass the objective function and arguments.
"""
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
pyll_rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error,
)
return (self.fn, pyll_rval)
def evaluate_async2(self, rval, ctrl, attach_attachments=True):
"""
this is the second part of async evaluation for ipython parallel engines (see ipy.py)
"""
if isinstance(rval, (float, int, np.number)):
dict_rval = {"loss": float(rval), "status": STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval["status"]
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval["loss"] = float(dict_rval["loss"])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop("attachments", {})
for key, val in list(attachments.items()):
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
return dict_rval
def short_str(self):
return "Domain{%s}" % str(self.fn)
def loss(self, result, config=None):
"""Extract the scalar-valued loss from a result document"""
return result.get("loss", None)
def loss_variance(self, result, config=None):
"""Return the variance in the estimate of the loss"""
return result.get("loss_variance", 0.0)
def true_loss(self, result, config=None):
"""Return a true loss, in the case that the `loss` is a surrogate"""
# N.B. don't use get() here, it evaluates self.loss un-necessarily
try:
return result["true_loss"]
except KeyError:
return self.loss(result, config=config)
def true_loss_variance(self, config=None):
"""Return the variance in true loss,
in the case that the `loss` is a surrogate.
"""
raise NotImplementedError()
def status(self, result, config=None):
"""Extract the job status from a result document"""
return result["status"]
def new_result(self):
"""Return a JSON-encodable object
to serve as the 'result' for new jobs.
"""
return {"status": STATUS_NEW}
# -- flake8 doesn't like blank last line
| 34,793 | 33.552135 | 93 |
py
|
hyperopt
|
hyperopt-master/hyperopt/tpe.py
|
"""
Graphical model (GM)-based optimization algorithm using Theano
"""
from past.utils import old_div
import logging
import time
import numpy as np
from scipy.special import erf
from . import pyll
from .pyll import scope
from .pyll.stochastic import implicit_stochastic
from .base import miscs_to_idxs_vals
from .base import miscs_update_idxs_vals
# from .base import Trials
from . import rand
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
logger = logging.getLogger(__name__)
EPS = 1e-12
# -- default linear forgetting. don't try to change by writing this variable
# because it's captured in function default args when this file is read
DEFAULT_LF = 25
adaptive_parzen_samplers = {}
# a decorator to register functions to the dict `adaptive_parzen_samplers`
def adaptive_parzen_sampler(name):
def wrapper(f):
assert name not in adaptive_parzen_samplers
adaptive_parzen_samplers[name] = f
return f
return wrapper
#
# These are some custom distributions
# that are used to represent posterior distributions.
#
# -- Categorical
@scope.define
def categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
@scope.define
def randint_via_categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
# -- Bounded Gaussian Mixture Model (BGMM)
@implicit_stochastic
@scope.define
def GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
"""Sample from truncated 1-D Gaussian Mixture Model"""
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
assert len(weights) == len(mus) == len(sigmas)
n_samples = int(np.prod(size))
# n_components = len(weights)
if low is None and high is None:
# -- draw from a standard GMM
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
samples = rng.normal(loc=mus[active], scale=sigmas[active])
else:
# -- draw from truncated components, handling one-sided truncation
low = float(low) if low is not None else -float("Inf")
high = float(high) if high is not None else float("Inf")
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(draw)
samples = np.reshape(np.asarray(samples), size)
if q is None:
return samples
return np.round(old_div(samples, q)) * q
@scope.define
def normal_cdf(x, mu, sigma):
top = x - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 * (1 + erf(z))
@scope.define
def GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
def print_verbose(s, x):
return print(f"GMM1_lpdf:{s}", x)
verbose = 0
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
if samples.size == 0:
return np.asarray([])
if weights.ndim != 1:
raise TypeError("need vector of weights", weights.shape)
if mus.ndim != 1:
raise TypeError("need vector of mus", mus.shape)
if sigmas.ndim != 1:
raise TypeError("need vector of sigmas", sigmas.shape)
assert len(weights) == len(mus) == len(sigmas)
_samples = samples
samples = _samples.flatten()
if verbose:
print_verbose("samples", set(samples))
print_verbose("weights", weights)
print_verbose("mus", mus)
print_verbose("sigmas", sigmas)
print_verbose("low", low)
print_verbose("high", high)
print_verbose("q", q)
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
dist = samples[:, None] - mus
mahal = (old_div(dist, np.maximum(sigmas, EPS))) ** 2
# mahal shape is (n_samples, n_components)
Z = np.sqrt(2 * np.pi * sigmas**2)
coef = weights / Z / p_accept
rval = logsum_rows(-0.5 * mahal + np.log(coef))
else:
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), high)
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), low)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * normal_cdf(ubound, mu, sigma)
inc_amt -= w * normal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
if verbose:
print_verbose("rval:", dict(list(zip(samples, rval))))
rval.shape = _samples.shape
return rval
# -- Mixture of Log-Normals
@scope.define
def lognormal_cdf(x, mu, sigma):
# wikipedia claims cdf is
# .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))
#
# the maximum is used to move negative values and 0 up to a point
# where they do not cause nan or inf, but also don't contribute much
# to the cdf.
if len(x) == 0:
return np.asarray([])
if x.min() < 0:
raise ValueError("negative arg to lognormal_cdf", x)
olderr = np.seterr(divide="ignore")
try:
top = np.log(np.maximum(x, EPS)) - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 + 0.5 * erf(z)
finally:
np.seterr(**olderr)
@scope.define
def lognormal_lpdf(x, mu, sigma):
# formula copied from wikipedia
# http://en.wikipedia.org/wiki/Log-normal_distribution
assert np.all(sigma >= 0)
sigma = np.maximum(sigma, EPS)
Z = sigma * x * np.sqrt(2 * np.pi)
E = 0.5 * (old_div((np.log(x) - mu), sigma)) ** 2
rval = -E - np.log(Z)
return rval
@scope.define
def qlognormal_lpdf(x, mu, sigma, q):
# casting rounds up to nearest step multiple.
# so lpdf is log of integral from x-step to x+1 of P(x)
# XXX: subtracting two numbers potentially very close together.
return np.log(lognormal_cdf(x, mu, sigma) - lognormal_cdf(x - q, mu, sigma))
@implicit_stochastic
@scope.define
def LGMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
n_samples = np.prod(size)
# n_components = len(weights)
if low is None and high is None:
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
assert len(active) == n_samples
samples = np.exp(rng.normal(loc=mus[active], scale=sigmas[active]))
else:
# -- draw from truncated components
# TODO: one-sided-truncation
low = float(low)
high = float(high)
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(np.exp(draw))
samples = np.asarray(samples)
samples = np.reshape(np.asarray(samples), size)
if q is not None:
samples = np.round(old_div(samples, q)) * q
return samples
def logsum_rows(x):
m = x.max(axis=1)
return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m
@scope.define
def LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
assert weights.ndim == 1
assert mus.ndim == 1
assert sigmas.ndim == 1
_samples = samples
if samples.ndim != 1:
samples = samples.flatten()
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
# compute the lpdf of each sample under each component
lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)
rval = logsum_rows(lpdfs + np.log(weights))
else:
# compute the lpdf of each sample under each component
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), np.exp(high))
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), np.exp(low))
lbound = np.maximum(0, lbound)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * lognormal_cdf(ubound, mu, sigma)
inc_amt -= w * lognormal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
rval.shape = _samples.shape
return rval
#
# This is the weird heuristic ParzenWindow estimator used for continuous
# distributions in various ways.
#
@scope.define_info(o_len=3)
def adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):
"""
A heuristic estimator for the mu and sigma values of a GMM
TODO: try to find this heuristic in the literature, and cite it - Yoshua
mentioned the term 'elastic' I think?
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus_orig = np.array(mus)
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
elif len(mus) == 1:
mus = np.asarray([prior_mu] + [mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
elif len(mus) >= 2:
order = np.argsort(mus)
mus = mus[order]
sigma = np.zeros_like(mus)
sigma[1:-1] = np.maximum(mus[1:-1] - mus[0:-2], mus[2:] - mus[1:-1])
if len(mus) > 2:
lsigma = mus[2] - mus[0]
usigma = mus[-1] - mus[-3]
else:
lsigma = mus[1] - mus[0]
usigma = mus[-1] - mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
# XXX: is sorting them necessary anymore?
# un-sort the mus and sigma
mus[order] = mus.copy()
sigma[order] = sigma.copy()
if not np.all(mus_orig == mus):
print("orig", mus_orig)
print("mus", mus)
assert np.all(mus_orig == mus)
# put the prior back in
mus = np.asarray([prior_mu] + list(mus))
sigma = np.asarray([prior_sigma] + list(sigma))
maxsigma = prior_sigma
# -- magic formula:
minsigma = old_div(prior_sigma, np.sqrt(1 + len(mus)))
sigma = np.clip(sigma, minsigma, maxsigma)
weights = np.ones(len(mus), dtype=mus.dtype)
weights[0] = prior_weight
weights = old_div(weights, weights.sum())
return weights, mus, sigma
@scope.define
def linear_forgetting_weights(N, LF):
assert N >= 0
assert LF > 0
if N == 0:
return np.asarray([])
if N < LF:
return np.ones(N)
ramp = np.linspace(old_div(1.0, N), 1.0, num=N - LF)
flat = np.ones(LF)
weights = np.concatenate([ramp, flat], axis=0)
assert weights.shape == (N,), (weights.shape, N)
return weights
# XXX: make TPE do a post-inference pass over the pyll graph and insert
# non-default LF argument
@scope.define_info(o_len=3)
def adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma, LF=DEFAULT_LF):
"""
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
srtd_mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
prior_pos = 0
elif len(mus) == 1:
if prior_mu < mus[0]:
prior_pos = 0
srtd_mus = np.asarray([prior_mu, mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
else:
prior_pos = 1
srtd_mus = np.asarray([mus[0], prior_mu])
sigma = np.asarray([prior_sigma * 0.5, prior_sigma])
elif len(mus) >= 2:
# create new_mus, which is sorted, and in which
# the prior has been inserted
order = np.argsort(mus)
prior_pos = np.searchsorted(mus[order], prior_mu)
srtd_mus = np.zeros(len(mus) + 1)
srtd_mus[:prior_pos] = mus[order[:prior_pos]]
srtd_mus[prior_pos] = prior_mu
srtd_mus[prior_pos + 1 :] = mus[order[prior_pos:]]
sigma = np.zeros_like(srtd_mus)
sigma[1:-1] = np.maximum(
srtd_mus[1:-1] - srtd_mus[0:-2], srtd_mus[2:] - srtd_mus[1:-1]
)
lsigma = srtd_mus[1] - srtd_mus[0]
usigma = srtd_mus[-1] - srtd_mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
if LF and LF < len(mus):
unsrtd_weights = linear_forgetting_weights(len(mus), LF)
srtd_weights = np.zeros_like(srtd_mus)
assert len(unsrtd_weights) + 1 == len(srtd_mus)
srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]
srtd_weights[prior_pos] = prior_weight
srtd_weights[prior_pos + 1 :] = unsrtd_weights[order[prior_pos:]]
else:
srtd_weights = np.ones(len(srtd_mus))
srtd_weights[prior_pos] = prior_weight
# -- magic formula:
maxsigma = old_div(prior_sigma, 1.0)
minsigma = old_div(prior_sigma, min(100.0, (1.0 + len(srtd_mus))))
sigma = np.clip(sigma, minsigma, maxsigma)
sigma[prior_pos] = prior_sigma
assert prior_sigma > 0
assert maxsigma > 0
assert minsigma > 0
assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)
srtd_weights /= srtd_weights.sum()
return srtd_weights, srtd_mus, sigma
#
# Adaptive Parzen Samplers
# These produce conditional estimators for various prior distributions
#
# NOTE: These are actually used in a fairly complicated way.
# They are actually returning pyll.Apply AST (Abstract Syntax Tree) objects.
# This AST is then manipulated and the corresponding _lpdf function is called
# (e.g GMM1_lpdf)
#
# Please see the build_posterior function for details
# -- Uniform
@adaptive_parzen_sampler("uniform")
def ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(
weights, mus, sigmas, low=low, high=high, q=None, size=size, rng=rng
)
@adaptive_parzen_sampler("quniform")
def ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("loguniform")
def ap_loguniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, prior_mu, prior_sigma
)
rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qloguniform")
def ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(
# -- map observations that were quantized to be below exp(low)
# (particularly 0) back up to exp(low) where they will
# interact in a reasonable way with the AdaptiveParzen
# thing.
scope.maximum(
obs,
scope.maximum( # -- protect against exp(low) underflow
EPS, scope.exp(low)
),
)
),
prior_weight,
prior_mu,
prior_sigma,
)
return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)
# -- Normal
@adaptive_parzen_sampler("normal")
def ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)
@adaptive_parzen_sampler("qnormal")
def ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("lognormal")
def ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qlognormal")
def ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
log_obs = scope.log(scope.maximum(obs, EPS))
weights, mus, sigmas = scope.adaptive_parzen_normal(
log_obs, prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
return rval
# -- Categorical
@adaptive_parzen_sampler("randint")
def ap_randint_sampler(
obs, prior_weight, low, high=None, size=(), rng=None, LF=DEFAULT_LF
):
# randint can be seen as a categorical with high - low categories
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# if high is None, then low represents high and there is no offset
domain_size = low if high is None else high - low
offset = pyll.Literal(0) if high is None else low
counts = scope.bincount(obs, offset=offset, minlength=domain_size, weights=weights)
# -- add in some prior pseudocounts
pseudocounts = counts + prior_weight
random_variable = scope.randint_via_categorical(
old_div(pseudocounts, scope.sum(pseudocounts)), size=size, rng=rng
)
return random_variable
@scope.define
def tpe_cat_pseudocounts(counts, prior_weight, p, size):
if np.prod(size) == 0:
return []
if p.ndim == 2:
assert np.all(p == p[0])
p = p[0]
pseudocounts = counts + p.size * (prior_weight * p)
return old_div(pseudocounts, np.sum(pseudocounts))
@adaptive_parzen_sampler("categorical")
def ap_categorical_sampler(obs, prior_weight, p, size=(), rng=None, LF=DEFAULT_LF):
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# in order to support pchoice here, we need to find the size of p,
# but p can have p.ndim == 2, so we pass p to bincount and unpack it
# (if required) there
counts = scope.bincount(obs, p=p, weights=weights)
pseudocounts = scope.tpe_cat_pseudocounts(counts, prior_weight, p, size)
return scope.categorical(pseudocounts, size=size, rng=rng)
#
# Posterior clone performs symbolic inference on the pyll graph of priors.
#
@scope.define_info(o_len=2)
def ap_split_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF):
"""Split the elements of `o_vals` (observations values) into two groups: those for
trials whose losses (`l_vals`) were above gamma, and those below gamma. Note that
only unique elements are returned, so the total number of returned elements might
be lower than `len(o_vals)`
"""
o_idxs, o_vals, l_idxs, l_vals = list(
map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])
)
# XXX if this is working, refactor this sort for efficiency
# Splitting is done this way to cope with duplicate loss values.
n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)
l_order = np.argsort(l_vals)
keep_idxs = set(l_idxs[l_order[:n_below]])
below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
keep_idxs = set(l_idxs[l_order[n_below:]])
above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
return np.asarray(below), np.asarray(above)
@scope.define
def broadcast_best(samples, below_llik, above_llik):
if len(samples):
score = below_llik - above_llik
if len(samples) != len(score):
raise ValueError()
best = np.argmax(score)
return [samples[best]] * len(samples)
else:
return []
def build_posterior(
specs,
prior_idxs,
prior_vals,
obs_idxs,
obs_vals,
obs_loss_idxs,
obs_loss_vals,
oloss_gamma,
prior_weight,
):
"""
This method clones a posterior inference graph by iterating forward in
topological order, and replacing prior random-variables (prior_idxs, prior_vals)
with new posterior distributions (post_specs, post_idxs, post_vals) that make use
of observations (obs_idxs, obs_vals).
"""
assert all(
isinstance(arg, pyll.Apply)
for arg in [obs_loss_idxs, obs_loss_vals, oloss_gamma]
)
assert set(prior_idxs.keys()) == set(prior_vals.keys())
expr = pyll.as_apply([specs, prior_idxs, prior_vals])
nodes = pyll.dfs(expr)
# build the joint posterior distribution as the values in this memo
memo = {}
# map prior RVs to observations
obs_memo = {}
for nid in prior_vals:
# construct the leading args for each call to adaptive_parzen_sampler
# which will permit the "adaptive parzen samplers" to adapt to the
# correct samples.
obs_below, obs_above = scope.ap_split_trials(
obs_idxs[nid], obs_vals[nid], obs_loss_idxs, obs_loss_vals, oloss_gamma
)
obs_memo[prior_vals[nid]] = [obs_below, obs_above]
for node in nodes:
if node not in memo:
new_inputs = [memo[arg] for arg in node.inputs()]
if node in obs_memo:
# -- this case corresponds to an observed Random Var
# node.name is a distribution like "normal", "randint", etc.
obs_below, obs_above = obs_memo[node]
aa = [memo[a] for a in node.pos_args]
fn = adaptive_parzen_samplers[node.name]
b_args = [obs_below, prior_weight] + aa
named_args = {kw: memo[arg] for (kw, arg) in node.named_args}
b_post = fn(*b_args, **named_args)
a_args = [obs_above, prior_weight] + aa
a_post = fn(*a_args, **named_args)
# fn is a function e.g ap_uniform_sampler, ap_normal_sampler, etc
# b_post and a_post are pyll.Apply objects that are
# AST (Abstract Syntax Trees). They create the distribution,
# (e.g. using adaptive_parzen_normal), and then
# call a function to sample randomly from that distribution
# (e.g. using scope.GMM1) which return those samples.
#
# However we are only interested in using the samples from b_post.
# This code looks at the AST and grabs the function name that we used
# for sampling (e.g. scope.GMM1) and modifies it, e.g. to
# "scope.GMM1_lpdf". It then calls this function, passing in the
# samples as the first parameter.a_args
#
# The result is that we are effectively calling, for example:
# below_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_below, ...))
# above_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_above, ...))
assert a_post.name == b_post.name
fn_lpdf = getattr(scope, a_post.name + "_lpdf")
a_kwargs = {
n: a for n, a in a_post.named_args if n not in ("rng", "size")
}
b_kwargs = {
n: a for n, a in b_post.named_args if n not in ("rng", "size")
}
# calculate the log likelihood of b_post under both distributions
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
# compute new_node based on below & above log likelihood
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
elif hasattr(node, "obj"):
# -- keep same literals in the graph
new_node = node
else:
# -- this case is for all the other stuff in the graph
new_node = node.clone_from_inputs(new_inputs)
memo[node] = new_node
post_idxs = {nid: memo[idxs] for nid, idxs in prior_idxs.items()}
post_vals = {nid: memo[vals] for nid, vals in prior_vals.items()}
return post_idxs, post_vals
# TODO: is this used?
# @scope.define
# def idxs_prod(full_idxs, idxs_by_label, llik_by_label):
# """Add all of the log-likelihoods together by id.
#
# Example arguments:
# full_idxs = [0, 1, ... N-1]
# idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}
# llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}
#
# This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]
# """
# assert len(set(full_idxs)) == len(full_idxs)
# full_idxs = list(full_idxs)
# rval = np.zeros(len(full_idxs))
# pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs))))))
# assert set(idxs_by_label.keys()) == set(llik_by_label.keys())
# for nid in idxs_by_label:
# idxs = idxs_by_label[nid]
# llik = llik_by_label[nid]
# assert np.all(np.asarray(idxs) > 1)
# assert len(set(idxs)) == len(idxs)
# assert len(idxs) == len(llik)
# for ii, ll in zip(idxs, llik):
# rval[pos_of_tid[ii]] += ll
# return rval
_default_prior_weight = 1.0
# -- suggest best of this many draws on every iteration
_default_n_EI_candidates = 24
# -- gamma * sqrt(n_trials) is fraction of to use as good
_default_gamma = 0.25
_default_n_startup_jobs = 20
_default_linear_forgetting = DEFAULT_LF
def build_posterior_wrapper(domain, prior_weight, gamma):
"""
Calls build_posterior
Args:
domain (hyperopt.base.Domain): contains info about the obj function and the hp
space passed to fmin
prior_weight (float): smoothing factor for counts, to avoid having 0 prob
# TODO: consider renaming or improving documentation for suggest
gamma (float): the threshold to split between l(x) and g(x), see eq. 2 in
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
Returns:
"""
# -- these dummy values will be replaced in build_posterior() and never used
observed = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
observed_loss = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
posterior = build_posterior(
# -- vectorized clone of bandit template
domain.vh.v_expr,
# -- this dict and next represent prior dists
domain.vh.idxs_by_label(),
domain.vh.vals_by_label(),
observed["idxs"],
observed["vals"],
observed_loss["idxs"],
observed_loss["vals"],
pyll.Literal(gamma),
pyll.Literal(float(prior_weight)),
)
return observed, observed_loss, posterior
def suggest(
new_ids,
domain,
trials,
seed,
prior_weight=_default_prior_weight,
n_startup_jobs=_default_n_startup_jobs,
n_EI_candidates=_default_n_EI_candidates,
gamma=_default_gamma,
verbose=True,
):
"""
Given previous trials and the domain, suggest the best expected hp point
according to the TPE-EI algo
Args:
prior_weight(
n_startup_jobs:
n_EI_candidates:
gamma:
verbose:
Returns:
"""
t0 = time.time()
# use build_posterior_wrapper to create the pyll nodes
observed, observed_loss, posterior = build_posterior_wrapper(
domain, prior_weight, gamma
)
tt = time.time() - t0
if verbose:
logger.info("build_posterior_wrapper took %f seconds" % tt)
# Loop over previous trials to collect best_docs and best_docs_loss
best_docs = dict()
best_docs_loss = dict()
for doc in trials.trials:
# get either these docs own tid or the one that it's from
tid = doc["misc"].get("from_tid", doc["tid"])
# associate infinite loss to new/running/failed jobs
loss = doc["result"].get("loss")
loss = float("inf") if loss is None else float(loss)
# if set, update loss for this tid if it's higher than current loss
# otherwise, set it
best_docs_loss.setdefault(tid, loss)
if loss <= best_docs_loss[tid]:
best_docs_loss[tid] = loss
best_docs[tid] = doc
# -- sort docs by order of suggestion
# so that linear_forgetting removes the oldest ones
tid_docs = sorted(best_docs.items())
losses = [best_docs_loss[tid] for tid, doc in tid_docs]
tids, docs = list(zip(*tid_docs)) if tid_docs else ([], [])
if verbose:
if docs:
s = "%i/%i trials with best loss %f" % (
len(docs),
len(trials),
np.nanmin(losses),
)
else:
s = "0 trials"
logger.info("TPE using %s" % s)
if len(docs) < n_startup_jobs:
# N.B. THIS SEEDS THE RNG BASED ON THE new_id
return rand.suggest(new_ids, domain, trials, seed)
# Sample and compute log-probability.
first_new_id = new_ids[0]
if tids:
# -- the +2 coordinates with an assertion above
# to ensure that fake ids are used during sampling
# TODO: not sure what assertion this refers to...
fake_id_0 = max(max(tids), first_new_id) + 2
else:
# -- weird - we're running the TPE algo from scratch
assert n_startup_jobs <= 0
fake_id_0 = first_new_id + 2
fake_ids = list(range(fake_id_0, fake_id_0 + n_EI_candidates))
# -- this dictionary will map pyll nodes to the values
# they should take during the evaluation of the pyll program
memo = {domain.s_new_ids: fake_ids, domain.s_rng: np.random.default_rng(seed)}
memo[observed_loss["idxs"]] = tids
memo[observed_loss["vals"]] = losses
observed_idxs_dict, observed_vals_dict = miscs_to_idxs_vals(
[doc["misc"] for doc in docs], keys=list(domain.params.keys())
)
memo[observed["idxs"]] = observed_idxs_dict
memo[observed["vals"]] = observed_vals_dict
# evaluate `n_EI_candidates` pyll nodes in `posterior` using `memo`
# TODO: it seems to return idxs, vals, all the same. Is this correct?
idxs, vals = pyll.rec_eval(posterior, memo=memo, print_node_on_error=False)
# hack to add offset again for randint params
for label, param in domain.params.items():
if param.name == "randint" and len(param.pos_args) == 2:
offset = param.pos_args[0].obj
vals[label] = [val + offset for val in vals[label]]
# -- retrieve the best of the samples and form the return tuple
# specs are deprecated since build_posterior makes all the same
rval_specs = [None]
rval_results = [domain.new_result()]
rval_miscs = [{"tid": first_new_id, "cmd": domain.cmd, "workdir": domain.workdir}]
miscs_update_idxs_vals(
rval_miscs,
idxs,
vals,
idxs_map={fake_ids[0]: first_new_id},
assert_all_vals_used=False,
)
# return the doc for the best new trial
return trials.new_trial_docs([first_new_id], rval_specs, rval_results, rval_miscs)
| 32,594 | 33.059561 | 93 |
py
|
hyperopt
|
hyperopt-master/hyperopt/mongoexp.py
|
"""
Mongodb-based Trials Object
===========================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evaluate function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
from future import standard_library
import copy
# import hashlib
import logging
import optparse
import os
# import shutil
import signal
import socket
import subprocess
import sys
import time
import urllib.parse
import warnings
import numpy
try:
import pymongo
import gridfs
from bson import SON
_has_mongo = True
except:
_has_mongo = False
from .base import JOB_STATES
from .base import JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE, JOB_STATE_ERROR
from .base import Trials
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
from .utils import working_dir, temp_dir
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import cloudpickle as pickler
except Exception as e:
logger.info(
'Failed to load cloudpickle, try installing cloudpickle via "pip '
'install cloudpickle" for enhanced pickling support.'
)
import pickle as pickler
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class DomainSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the allotted time"""
def read_pw():
return open(os.path.join(os.getenv("HOME"), ".hyperopt")).read()[:-1]
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol = url[: url.find(":")]
ftp_url = "ftp" + url[url.find(":") :]
# -- parse the string as if it were an ftp address
tmp = urllib.parse.urlparse(ftp_url)
query_params = urllib.parse.parse_qs(tmp.query)
logger.info("PROTOCOL %s" % protocol)
logger.info("USERNAME %s" % tmp.username)
logger.info("HOSTNAME %s" % tmp.hostname)
logger.info("PORT %s" % tmp.port)
logger.info("PATH %s" % tmp.path)
authdbname = None
if "authSource" in query_params and len(query_params["authSource"]):
authdbname = query_params["authSource"][-1]
logger.info("AUTH DB %s" % authdbname)
try:
_, dbname, collection = tmp.path.split("/")
except:
print("Failed to parse '%s'" % (str(tmp.path)), file=sys.stderr)
raise
logger.info("DB %s" % dbname)
logger.info("COLLECTION %s" % collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
if password is not None:
logger.info("PASS ***")
port = int(float(tmp.port)) # port has to be casted explicitly here.
return (
protocol,
tmp.username,
password,
tmp.hostname,
port,
dbname,
collection,
authdbname,
)
def connection_with_tunnel(
dbname,
host="localhost",
auth_dbname=None,
port=27017,
ssh=False,
user="hyperopt",
pw=None,
):
if ssh:
local_port = numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
["ssh", "-NTf", "-L", "%i:%s:%i" % (local_port, "127.0.0.1", port), host]
)
# -- give the subprocess time to set up
time.sleep(0.5)
connection = pymongo.MongoClient(
"127.0.0.1", local_port, document_class=SON, w=1, journal=True
)
else:
connection = pymongo.MongoClient(
host, port, document_class=SON, w=1, journal=True
)
if user:
if not pw:
pw = read_pw()
if user == "hyperopt" and not auth_dbname:
auth_dbname = "admin"
connection[dbname].authenticate(user, pw, source=auth_dbname)
ssh_tunnel = None
# Note that the w=1 and journal=True args to MongoClient above should:
# -- Ensure that changes are written to at least one server.
# -- Ensure that changes are written to the journal if there is one.
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection, authdb = parse_url(s)
if protocol == "mongo":
ssh = False
elif protocol in ("mongo+ssh", "ssh+mongo"):
ssh = True
else:
raise ValueError("unrecognized protocol for MongoJobs", protocol)
connection, tunnel = connection_with_tunnel(
dbname=db, ssh=ssh, user=user, pw=pw, host=host, port=port, auth_dbname=authdb
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs:
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
"""
Parameters
----------
db - Mongo Database (e.g. `Connection()[dbname]`)
database in which all job-related info is stored
jobs - Mongo Collection handle
collection within `db` to use for job arguments, return vals,
and various bookkeeping stuff and meta-data. Typically this is
`db['jobs']`
gfs - Mongo GridFS handle
GridFS is used to store attachments - binary blobs that don't fit
or are awkward to store in the `jobs` collection directly.
conn - Mongo Connection
Why we need to keep this, I'm not sure.
tunnel - something for ssh tunneling if you're doing that
See `connection_with_tunnel` for more info.
config_name - string
XXX: No idea what this is for, seems unimportant.
"""
if not _has_mongo:
raise Exception(
"MongoJobs cannot import pymongo classes. Make sure that pymongo "
"is available in your environment. E.g., try running 'import pymongo'"
)
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn = conn
self.tunnel = tunnel
self.config_name = config_name
collection = property(lambda s: s.jobs)
@classmethod
def alloc(
cls,
dbname,
host="localhost",
auth_dbname="admin",
port=27017,
jobs_coll="jobs",
gfs_coll="fs",
ssh=False,
user=None,
pw=None,
):
connection, tunnel = connection_with_tunnel(
dbname, host, auth_dbname, port, ssh, user, pw
)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll="fs", config_name="spec"):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ["exp_key", "result.loss", "book_time"]:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index("exp_key", unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if not r.get("MIA", False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
# TODO: mark some as MIA
rval = [r for r in rval if r.get("MIA", False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# -- this call adds an _id field to cpy
_id = self.jobs.insert(cpy, check_keys=True)
# -- so now we return the dict with the _id field
assert _id == cpy["_id"]
return cpy
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# This was meant to make it easier to catch insertion errors
# in a generic way even if different databases were used.
# ... but there's just MongoDB so far, so kinda goofy.
raise OperationFailure(e)
def delete(self, job):
"""Delete job[s]"""
try:
self.jobs.remove(job)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all(self, cond=None):
"""Delete all jobs and attachments"""
if cond is None:
cond = {}
try:
for d in self.jobs.find(filter=cond, projection=["_id", "_attachments"]):
logger.info("deleting job %s" % d["_id"])
for name, file_id in d.get("_attachments", []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error(f"failed to remove attachment {name}:{file_id}")
self.jobs.remove(d)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
def delete_all_error_jobs(self):
return self.delete_all(cond={"state": JOB_STATE_ERROR})
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(
cond
) # copy is important, will be modified, but only the top-level
if exp_key is not None:
cond["exp_key"] = exp_key
# having an owner of None implies state==JOB_STATE_NEW, so this effectively
# acts as a filter to make sure that only new jobs get reserved.
if cond.get("owner") is not None:
raise ValueError("refusing to reserve owned job")
else:
cond["owner"] = None
cond[
"state"
] = JOB_STATE_NEW # theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{
"$set": {
"owner": host_id,
"book_time": now,
"state": JOB_STATE_RUNNING,
"refresh_time": now,
}
},
new=True,
upsert=False,
)
except pymongo.errors.OperationFailure as e:
logger.error("Error during reserve_job: %s" % str(e))
rval = None
return rval
def refresh(self, doc):
self.update(doc, dict(refresh_time=coarse_utcnow()))
def update(self, doc, dct, collection=None, do_sanity_checks=True):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
"""
if collection is None:
collection = self.collection
dct = copy.deepcopy(dct)
if "_id" not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if "_id" in dct:
if dct["_id"] != doc["_id"]:
raise ValueError("cannot update the _id field")
del dct["_id"]
if "version" in dct:
if dct["version"] != doc["version"]:
warnings.warn('Ignoring "version" field in update dictionary')
if "version" in doc:
doc_query = dict(_id=doc["_id"], version=doc["version"])
dct["version"] = doc["version"] + 1
else:
doc_query = dict(_id=doc["_id"])
dct["version"] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(doc_query, {"$set": dct}, upsert=False, multi=False)
except pymongo.errors.OperationFailure as e:
# -- translate pymongo error class into hyperopt error class
# see insert() code for rationale.
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if do_sanity_checks:
server_doc = collection.find_one(
dict(_id=doc["_id"], version=doc["version"])
)
if server_doc is None:
raise OperationFailure("updated doc not found : %s" % str(doc))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], str), name_id
return str(name_id[0])
return list(map(as_str, doc.get("_attachments", [])))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get("_attachments", [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename="{}_{}".format(doc["_id"], name))
logger.info(
"stored blob of %i bytes with id=%s and filename %s_%s"
% (len(blob), str(new_file_id), doc["_id"], name)
)
new_attachments = [a for a in attachments if a[0] != name] + [
(name, new_file_id)
]
try:
ii = 0
doc = self.update(
doc, {"_attachments": new_attachments}, collection=collection
)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning(
"Leak during set_attachment: old_file_id=%s" % (name_matches[ii][1])
)
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
# return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get("_attachments", [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure("Attachment not found: %s" % name)
if len(file_ids) > 1:
raise OperationFailure("multiple name matches", (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get("_attachments", [])
file_id = None
for i, a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure("Attachment not found: %s" % name)
del attachments[i]
self.update(doc, {"_attachments": attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
asynchronous = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None, refresh=True):
if not _has_mongo:
raise Exception(
"MongoTrials cannot import pymongo classes. Make sure that pymongo "
"is available in your environment. E.g., try running 'import pymongo'"
)
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(
self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh,
)
return rval
def refresh_tids(self, tids):
"""Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
query = {"exp_key": exp_key} if exp_key != None else {}
t0 = time.time()
query["state"] = {"$ne": JOB_STATE_ERROR}
if tids is not None:
query["tid"] = {"$in": list(tids)}
orig_trials = getattr(self, "_trials", [])
_trials = orig_trials[:] # copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query, projection=["_id", "version"]))
# -- pull down a fresh list of ids from mongo
if db_data:
# make numpy data arrays
db_data = numpy.rec.array(
[(x["_id"], int(x["version"])) for x in db_data],
names=["_id", "version"],
)
db_data.sort(order=["_id", "version"])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array(
[(x["_id"], int(x["version"])) for x in _trials],
names=["_id", "version"],
)
existing_data.sort(order=["_id", "version"])
# which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data["_id"], existing_data["_id"])
existing_in_db = fast_isin(existing_data["_id"], db_data["_id"])
# filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
# new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
# having removed the new and out of data data,
# concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data["_id"] == db_data["_id"]).all()
assert (existing_data["version"] <= db_data["version"]).all()
except:
report_path = os.path.join(
os.getcwd(),
"hyperopt_refresh_crash_report_"
+ str(numpy.random.randint(1e8))
+ ".pkl",
)
logger.error(
"HYPEROPT REFRESH ERROR: writing error file to %s" % report_path
)
_file = open(report_path, "w")
pickler.dump(
{"db_data": db_data, "existing_data": existing_data}, _file
)
_file.close()
raise
same_version = existing_data["version"] == db_data["version"]
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
# actually get the updated records
update_ids = new_data["_id"].tolist() + version_changes["_id"].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query["_id"] = {"$in": update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
# this case is for performance, though should be able to be removed
# without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug(
"Refresh data download took %f seconds for %d ids"
% (time.time() - t0, num_new)
)
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t["tid"] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t["tid"] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j["_id"] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]["spec"] for _idx in jobsort]
self._results = [_trials[_idx]["result"] for _idx in jobsort]
self._miscs = [_trials[_idx]["misc"] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError("invalid state", arg)
query = dict(state=arg)
else:
assert hasattr(arg, "__iter__")
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={"$in": states})
if exp_key != None:
query["exp_key"] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
cond = {} if cond is None else dict(cond)
if self._exp_key:
cond["exp_key"] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, last_id):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a suggest() that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {"a": 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query, {"$inc": {"last_id": last_id}}, upsert=True
)
if doc is None:
logger.warning("no last_id found, re-trying")
time.sleep(1.0)
lid = doc.get("last_id", 0)
return list(range(lid, lid + last_id))
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __init__(self, handle: MongoJobs):
self.handle = handle
def __contains__(self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(self, name):
try:
return self.handle.get_attachment(doc=trial, name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(self, name, value):
self.handle.set_attachment(
doc=trial, blob=value, name=name, collection=self.handle.db.jobs
)
def __delitem__(self, name):
raise NotImplementedError("delete trial_attachment")
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments(self.handle)
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query["exp_key"] = self._exp_key
class Attachments:
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname for fname in gfs.list() if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, encoding="utf-8", **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker:
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(
self,
mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename="logfile.txt",
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(fmt="%(levelname)s (%(name)s): %(message)s")
)
self.log_handler.setLevel(logging.INFO)
def run_one(self, host_id=None, reserve_timeout=None, erase_created_workdir=False):
if host_id == None:
host_id = ("%s:%i" % (socket.gethostname(), os.getpid()),)
job = None
start_time = time.time()
mj = self.mj
while job is None:
if reserve_timeout and (time.time() - start_time) > reserve_timeout:
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = 1 + numpy.random.rand() * (float(self.poll_interval) - 1.0)
logger.info("no job found, sleeping for %.1fs" % interval)
time.sleep(interval)
logger.debug("job found: %s" % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job["misc"])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job["exp_key"], refresh=False),
read_only=False,
current_trial=job,
)
if self.workdir is None:
workdir = job["misc"].get("workdir", os.getcwd())
if workdir is None:
workdir = ""
workdir = os.path.join(workdir, str(job["_id"]))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job["misc"]["cmd"]
cmd_protocol = cmd[0]
try:
if cmd_protocol == "cpickled fn":
worker_fn = pickler.loads(cmd[1])
elif cmd_protocol == "call evaluate":
bandit = pickler.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "token_load":
cmd_toks = cmd[1].split(".")
cmd_module = ".".join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == "bandit_json evaluate":
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == "driver_attachment":
# name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = pickler.loads(blob)
worker_fn = json_call(
bandit_name, args=bandit_args, kwargs=bandit_kwargs
).evaluate
elif cmd_protocol == "domain_attachment":
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = pickler.loads(blob)
except BaseException as e:
logger.info("Error while unpickling.")
raise
worker_fn = domain.evaluate
else:
raise ValueError("Unrecognized cmd protocol", cmd_protocol)
with temp_dir(workdir, erase_created_workdir), working_dir(workdir):
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException as e:
# XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info("job exception: %s" % str(e))
ctrl.checkpoint()
mj.update(
job, {"state": JOB_STATE_ERROR, "error": (str(type(e)), str(e))}
)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
logger.info("job finished: %s" % str(job["_id"]))
attachments = result.pop("attachments", {})
for aname, aval in list(attachments.items()):
logger.info(
"mongoexp: saving attachment name=%s (%i bytes)" % (aname, len(aval))
)
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {"state": JOB_STATE_DONE})
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
"set_attachment deprecated. Use `self.attachments[name] = value`"
)
def exec_import(cmd_module, cmd):
worker_fn = None
exec(f"import {cmd_module}; worker_fn = {cmd}")
return worker_fn
def as_mongo_str(s):
if s.startswith("mongo://"):
return s
return "mongo://%s" % s
def number_of_jobs_in_db(options):
mj = MongoJobs.new_from_connection_str(as_mongo_str(options.mongo) + "/jobs")
final_num = mj.jobs.find().count()
return final_num
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
end_time = time.time() + float(options.last_job_timeout)
else:
end_time = None
def sighandler_shutdown(signum, frame):
logger.info("Caught signal %i, shutting down." % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info("Caught signal %i, shutting down." % signum)
raise WaitQuit(signum)
is_windows = os.name == "nt"
if not is_windows:
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
if N > 1:
proc = None
cons_errs = 0
while N and cons_errs < int(options.max_consecutive_failures):
# exit due to time limit:
if end_time and time.time() > end_time:
logger.info("Exiting due to last_job_timeout")
return
# exit due to threshold on number of jobs:
if (
options.max_jobs_in_db is not None
and options.max_jobs_in_db != sys.maxsize
):
num_jobs_db = number_of_jobs_in_db(options)
if int(num_jobs_db) >= int(options.max_jobs_in_db):
logger.info(
"Exiting because there are "
+ str(num_jobs_db)
+ " jobs in the database, but the limit is "
+ str(options.max_jobs_in_db)
)
return
# try to run one MongoWorker
try:
if options.use_subprocesses:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [
sys.argv[0],
"--poll-interval=%s" % options.poll_interval,
"--max-jobs=1",
"--mongo=%s" % options.mongo,
"--reserve-timeout=%s" % options.reserve_timeout,
]
if options.workdir is not None:
sub_argv.append("--workdir=%s" % options.workdir)
if options.exp_key is not None:
sub_argv.append("--exp-key=%s" % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
else:
current_mongo_str = as_mongo_str(options.mongo)
# Remove this if not necessary:
if "/jobs" not in current_mongo_str:
current_mongo_str += "/jobs"
mj = MongoJobs.new_from_connection_str(current_mongo_str)
mworker = MongoWorker(
mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key,
)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
retcode = 0
except Shutdown:
# this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
# proc.terminate() is only available as of 2.6
os.kill(
proc.pid, signal.CTRL_C_EVENT if is_windows else signal.SIGTERM
)
return proc.wait()
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info(
"exiting with N=%i after %i consecutive exceptions" % (N, cons_errs)
)
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(as_mongo_str(options.mongo) + "/jobs")
mworker = MongoWorker(
mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key,
)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
sys.exit(main_worker())
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option(
"--exp-key",
dest="exp_key",
default=None,
metavar="str",
help="identifier for this workers's jobs",
)
parser.add_option(
"--last-job-timeout",
dest="last_job_timeout",
metavar="T",
default=None,
help="Do not reserve a job after T seconds have passed",
)
parser.add_option(
"--max-consecutive-failures",
dest="max_consecutive_failures",
metavar="N",
default=4,
help="stop if N consecutive jobs fail (default: 4)",
)
parser.add_option(
"--max-jobs",
dest="max_jobs",
default=sys.maxsize,
help="stop after running this many jobs (default: inf)",
)
parser.add_option(
"--mongo",
dest="mongo",
default="localhost/hyperopt",
help="<host>[:port]/<db> for IPC and job storage",
)
parser.add_option(
"--poll-interval",
dest="poll_interval",
metavar="N",
default=5,
help="check work queue every 1 < T < N seconds (default: 5",
)
parser.add_option(
"--reserve-timeout",
dest="reserve_timeout",
metavar="T",
default=120.0,
help="poll database for up to T seconds to reserve a job",
)
parser.add_option(
"--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR",
)
parser.add_option(
"--no-subprocesses",
dest="use_subprocesses",
default=True,
action="store_false",
help="do not use sub-processes for each objective evaluation, the objective function will run in the same "
"python process (useful to keep in memory large data across objective evals) but you have to pay "
"attention to memory leaks (default: False)",
)
parser.add_option(
"--max-jobs-in-db",
dest="max_jobs_in_db",
default=sys.maxsize,
help="max jobs in db (default: " + str(sys.maxsize) + ")",
)
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
| 49,421 | 33.976645 | 115 |
py
|
hyperopt
|
hyperopt-master/hyperopt/criteria.py
|
"""Criteria for Bayesian optimization
"""
from past.utils import old_div
import numpy as np
import scipy.stats
def EI_empirical(samples, thresh):
"""Expected Improvement over threshold from samples
(See example usage in EI_gaussian_empirical)
"""
improvement = np.maximum(samples - thresh, 0)
return improvement.mean()
def EI_gaussian_empirical(mean, var, thresh, rng, N):
"""Expected Improvement of Gaussian over threshold
(estimated empirically)
"""
return EI_empirical(rng.standard_normal(N) * np.sqrt(var) + mean, thresh)
def EI_gaussian(mean, var, thresh):
"""Expected Improvement of Gaussian over threshold
(estimated analytically)
"""
sigma = np.sqrt(var)
score = old_div((mean - thresh), sigma)
n = scipy.stats.norm
return sigma * (score * n.cdf(score) + n.pdf(score))
def logEI_gaussian(mean, var, thresh):
"""Return log(EI(mean, var, thresh))
This formula avoids underflow in cdf for
thresh >= mean + 37 * sqrt(var)
"""
assert np.asarray(var).min() >= 0
sigma = np.sqrt(var)
score = old_div((mean - thresh), sigma)
n = scipy.stats.norm
try:
float(mean)
is_scalar = True
except TypeError:
is_scalar = False
if is_scalar:
if score < 0:
pdf = n.logpdf(score)
r = np.exp(np.log(-score) + n.logcdf(score) - pdf)
rval = np.log(sigma) + pdf + np.log1p(-r)
if not np.isfinite(rval):
return -np.inf
else:
return rval
else:
return np.log(sigma) + np.log(score * n.cdf(score) + n.pdf(score))
else:
score = np.asarray(score)
rval = np.zeros_like(score)
olderr = np.seterr(all="ignore")
try:
negs = score < 0
nonnegs = np.logical_not(negs)
negs_score = score[negs]
negs_pdf = n.logpdf(negs_score)
r = np.exp(np.log(-negs_score) + n.logcdf(negs_score) - negs_pdf)
rval[negs] = np.log(sigma[negs]) + negs_pdf + np.log1p(-r)
nonnegs_score = score[nonnegs]
rval[nonnegs] = np.log(sigma[nonnegs]) + np.log(
nonnegs_score * n.cdf(nonnegs_score) + n.pdf(nonnegs_score)
)
rval[np.logical_not(np.isfinite(rval))] = -np.inf
finally:
np.seterr(**olderr)
return rval
def UCB(mean, var, zscore):
"""Upper Confidence Bound
For a model which predicts a Gaussian-distributed outcome, the UCB is
mean + zscore * sqrt(var)
"""
return mean + np.sqrt(var) * zscore
# -- flake8
| 2,655 | 26.381443 | 78 |
py
|
hyperopt
|
hyperopt-master/hyperopt/rdists.py
|
"""
Extra distributions to complement scipy.stats
"""
from past.utils import old_div
import numpy as np
import numpy.random as mtrand
import scipy.stats
from scipy.stats import rv_continuous # , rv_discrete
from scipy.stats._continuous_distns import lognorm_gen as scipy_lognorm_gen
class loguniform_gen(rv_continuous):
"""Stats for Y = e^X where X ~ U(low, high)."""
def __init__(self, low=0, high=1):
rv_continuous.__init__(self, a=np.exp(low), b=np.exp(high))
self._low = low
self._high = high
def _rvs(self):
rval = np.exp(mtrand.uniform(self._low, self._high, self._size))
return rval
def _pdf(self, x):
return old_div(1.0, (x * (self._high - self._low)))
def _logpdf(self, x):
return -np.log(x) - np.log(self._high - self._low)
def _cdf(self, x):
return old_div((np.log(x) - self._low), (self._high - self._low))
class lognorm_gen(scipy_lognorm_gen):
def __init__(self, mu, sigma):
self.mu_ = mu
self.s_ = sigma
scipy_lognorm_gen.__init__(self)
# I still don't understand what scipy stats objects are doing
# re: this stuff
del self.__dict__["_parse_args"]
del self.__dict__["_parse_args_stats"]
del self.__dict__["_parse_args_rvs"]
def _parse_args(self, *args, **kwargs):
assert not args, args
assert not kwargs, kwargs
args = (self.s_,)
loc = 0
scale = np.exp(self.mu_)
return args, loc, scale
def qtable_pmf(x, q, qlow, xs, ps):
qx = np.round(old_div(np.atleast_1d(x).astype(float), q)) * q
is_multiple = np.isclose(qx, x)
ix = np.round(old_div((qx - qlow), q)).astype(int)
is_inbounds = np.logical_and(ix >= 0, ix < len(ps))
oks = np.logical_and(is_multiple, is_inbounds)
rval = np.zeros_like(qx)
rval[oks] = np.asarray(ps)[ix[oks]]
if isinstance(x, np.ndarray):
return rval.reshape(x.shape)
return float(rval)
def qtable_logpmf(x, q, qlow, xs, ps):
p = qtable_pmf(np.atleast_1d(x), q, qlow, xs, ps)
# -- this if/else avoids np warning about underflow
rval = np.zeros_like(p)
rval[p == 0] = -np.inf
rval[p != 0] = np.log(p[p != 0])
if isinstance(x, np.ndarray):
return rval
return float(rval)
class quniform_gen:
# -- not inheriting from scipy.stats.rv_discrete
# because I don't understand the design of those rv classes
"""Stats for Y = q * round(X / q) where X ~ U(low, high)."""
def __init__(self, low, high, q):
low, high = list(map(float, (low, high)))
qlow = safe_int_cast(np.round(old_div(low, q))) * q
qhigh = safe_int_cast(np.round(old_div(high, q))) * q
if qlow == qhigh:
xs = [qlow]
ps = [1.0]
else:
lowmass = 1 - (old_div((low - qlow + 0.5 * q), q))
assert 0 <= lowmass <= 1.0, (lowmass, low, qlow, q)
highmass = old_div((high - qhigh + 0.5 * q), q)
assert 0 <= highmass <= 1.0, (highmass, high, qhigh, q)
# -- xs: qlow to qhigh inclusive
xs = np.arange(qlow, qhigh + 0.5 * q, q)
ps = np.ones(len(xs))
ps[0] = lowmass
ps[-1] = highmass
ps /= ps.sum()
self.low = low
self.high = high
self.q = q
self.qlow = qlow
self.qhigh = qhigh
self.xs = np.asarray(xs)
self.ps = np.asarray(ps)
def pmf(self, x):
return qtable_pmf(x, self.q, self.qlow, self.xs, self.ps)
def logpmf(self, x):
return qtable_logpmf(x, self.q, self.qlow, self.xs, self.ps)
def rvs(self, size=()):
rval = mtrand.uniform(low=self.low, high=self.high, size=size)
rval = safe_int_cast(np.round(old_div(rval, self.q))) * self.q
return rval
class qloguniform_gen(quniform_gen):
"""Stats for Y = q * round(e^X / q) where X ~ U(low, high)."""
# -- not inheriting from scipy.stats.rv_discrete
# because I don't understand the design of those rv classes
def __init__(self, low, high, q):
low, high = list(map(float, (low, high)))
elow = np.exp(low)
ehigh = np.exp(high)
qlow = safe_int_cast(np.round(old_div(elow, q))) * q
qhigh = safe_int_cast(np.round(old_div(ehigh, q))) * q
# -- loguniform for using the CDF
lu = loguniform_gen(low=low, high=high)
cut_low = np.exp(low) # -- lowest possible pre-round value
cut_high = min(
qlow + 0.5 * q, ehigh # -- highest value that would ...
) # -- round to qlow
xs = [qlow]
ps = [lu.cdf(cut_high)]
ii = 0
cdf_high = ps[0]
while cut_high < (ehigh - 1e-10):
# TODO: cut_low never used
cut_high, cut_low = min(cut_high + q, ehigh), cut_high
cdf_high, cdf_low = lu.cdf(cut_high), cdf_high
ii += 1
xs.append(qlow + ii * q)
ps.append(cdf_high - cdf_low)
ps = np.asarray(ps)
ps /= ps.sum()
self.low = low
self.high = high
self.q = q
self.qlow = qlow
self.qhigh = qhigh
self.xs = np.asarray(xs)
self.ps = ps
def pmf(self, x):
return qtable_pmf(x, self.q, self.qlow, self.xs, self.ps)
def logpmf(self, x):
return qtable_logpmf(x, self.q, self.qlow, self.xs, self.ps)
def rvs(self, size=()):
x = mtrand.uniform(low=self.low, high=self.high, size=size)
rval = safe_int_cast(np.round(old_div(np.exp(x), self.q))) * self.q
return rval
class qnormal_gen:
"""Stats for Y = q * round(X / q) where X ~ N(mu, sigma)"""
def __init__(self, mu, sigma, q):
self.mu, self.sigma = list(map(float, (mu, sigma)))
self.q = q
# -- distfn for using the CDF
self._norm_logcdf = scipy.stats.norm(loc=mu, scale=sigma).logcdf
def in_domain(self, x):
return np.isclose(x, safe_int_cast(np.round(old_div(x, self.q))) * self.q)
def pmf(self, x):
return np.exp(self.logpmf(x))
def logpmf(self, x):
x1 = np.atleast_1d(x)
in_domain = self.in_domain(x1)
rval = np.zeros_like(x1, dtype=float) - np.inf
x_in_domain = x1[in_domain]
ubound = x_in_domain + self.q * 0.5
lbound = x_in_domain - self.q * 0.5
# -- reflect intervals right of mu to other side
# for more accurate calculation
flip = lbound > self.mu
tmp = lbound[flip].copy()
lbound[flip] = self.mu - (ubound[flip] - self.mu)
ubound[flip] = self.mu - (tmp - self.mu)
assert np.all(ubound > lbound)
a = self._norm_logcdf(ubound)
b = self._norm_logcdf(lbound)
rval[in_domain] = a + np.log1p(-np.exp(b - a))
if isinstance(x, np.ndarray):
return rval
return float(rval)
def rvs(self, size=()):
x = mtrand.normal(loc=self.mu, scale=self.sigma, size=size)
rval = safe_int_cast(np.round(old_div(x, self.q))) * self.q
return rval
class qlognormal_gen:
"""Stats for Y = q * round(exp(X) / q) where X ~ N(mu, sigma)"""
def __init__(self, mu, sigma, q):
self.mu, self.sigma = list(map(float, (mu, sigma)))
self.q = q
# -- distfn for using the CDF
self._norm_cdf = scipy.stats.norm(loc=mu, scale=sigma).cdf
def in_domain(self, x):
return np.logical_and(
(x >= 0),
np.isclose(x, safe_int_cast(np.round(old_div(x, self.q))) * self.q),
)
def pmf(self, x):
x1 = np.atleast_1d(x)
in_domain = self.in_domain(x1)
x1_in_domain = x1[in_domain]
rval = np.zeros_like(x1, dtype=float)
rval_in_domain = self._norm_cdf(np.log(x1_in_domain + 0.5 * self.q))
rval_in_domain[x1_in_domain != 0] -= self._norm_cdf(
np.log(x1_in_domain[x1_in_domain != 0] - 0.5 * self.q)
)
rval[in_domain] = rval_in_domain
if isinstance(x, np.ndarray):
return rval
return float(rval)
def logpmf(self, x):
pmf = self.pmf(np.atleast_1d(x))
assert np.all(pmf >= 0)
pmf[pmf == 0] = -np.inf
pmf[pmf > 0] = np.log(pmf[pmf > 0])
if isinstance(x, np.ndarray):
return pmf
return float(pmf)
def rvs(self, size=()):
x = mtrand.normal(loc=self.mu, scale=self.sigma, size=size)
rval = safe_int_cast(np.round(old_div(np.exp(x), self.q))) * self.q
return rval
def safe_int_cast(obj):
if isinstance(obj, np.ndarray):
return obj.astype("int")
if isinstance(obj, list):
return [int(i) for i in obj]
return int(obj)
# -- non-empty last line for flake8
| 8,791 | 30.740072 | 82 |
py
|
hyperopt
|
hyperopt-master/hyperopt/exceptions.py
|
"""
"""
class BadSearchSpace(Exception):
"""Something is wrong in the description of the search space"""
class DuplicateLabel(BadSearchSpace):
"""A search space included a duplicate label"""
class InvalidTrial(ValueError):
"""Non trial-like object used as Trial"""
def __init__(self, msg, obj):
ValueError.__init__(self, msg + " " + str(obj))
self.obj = obj
class InvalidResultStatus(ValueError):
"""Status of fmin evaluation was not in base.STATUS_STRINGS"""
def __init__(self, result):
ValueError.__init__(self)
self.result = result
class InvalidLoss(ValueError):
"""fmin evaluation returned invalid loss value"""
def __init__(self, result):
ValueError.__init__(self)
self.result = result
class AllTrialsFailed(Exception):
"""All optimization steps have finished with status base.STATUS_FAIL"""
class InvalidAnnotatedParameter(ValueError):
"""fn has a type hint that is not from hp."""
def __init__(self, result):
ValueError.__init__(self)
self.result = result
# -- flake8 doesn't like blank last line
| 1,132 | 21.66 | 75 |
py
|
hyperopt
|
hyperopt-master/hyperopt/plotting.py
|
"""
Functions to visualize an Experiment.
"""
import pickle
try:
unicode = unicode
except NameError:
basestring = (str, bytes)
else:
basestring = basestring
# -- don't import this here because it locks in the backend
# and we want the unittests to be able to set the backend
# TODO: this is really bad style, create a backend plotting
# module for this that defaults to matplotlib.
# import matplotlib.pyplot as plt
import numpy as np
from . import base
from .base import miscs_to_idxs_vals
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
default_status_colors = {
base.STATUS_NEW: "k",
base.STATUS_RUNNING: "g",
base.STATUS_OK: "b",
base.STATUS_FAIL: "r",
}
def main_plot_history(trials, do_show=True, status_colors=None, title="Loss History"):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
# self is an Experiment
if status_colors is None:
status_colors = default_status_colors
# XXX: show the un-finished or error trials
Ys, colors = zip(
*[
(y, status_colors[s])
for y, s in zip(trials.losses(), trials.statuses())
if y is not None
]
)
plt.scatter(range(len(Ys)), Ys, c=colors)
plt.xlabel("time")
plt.ylabel("loss")
best_err = trials.average_best_error()
print("avg best error:", best_err)
plt.axhline(best_err, c="g")
plt.title(title)
if do_show:
plt.show()
def main_plot_histogram(trials, do_show=True, title="Loss Histogram"):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
status_colors = default_status_colors
Xs, Ys, Ss, Cs = zip(
*[
(x, y, s, status_colors[s])
for (x, y, s) in zip(trials.specs, trials.losses(), trials.statuses())
if y is not None
]
)
# XXX: deal with ok vs. un-finished vs. error trials
print("Showing Histogram of %i jobs" % len(Ys))
plt.hist(Ys)
plt.xlabel("loss")
plt.ylabel("frequency")
plt.title(title)
if do_show:
plt.show()
def main_plot_vars(
trials,
do_show=True,
fontsize=10,
colorize_best=None,
columns=5,
arrange_by_loss=False,
):
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
idxs, vals = miscs_to_idxs_vals(trials.miscs)
losses = trials.losses()
finite_losses = [y for y in losses if y not in (None, float("inf"))]
asrt = np.argsort(finite_losses)
if colorize_best is not None:
colorize_thresh = finite_losses[asrt[colorize_best + 1]]
else:
# -- set to lower than best (disabled)
colorize_thresh = finite_losses[asrt[0]] - 1
loss_min = min(finite_losses)
loss_max = max(finite_losses)
print("finite loss range", loss_min, loss_max, colorize_thresh)
loss_by_tid = dict(zip(trials.tids, losses))
def color_fn(lossval):
if lossval is None:
return 1, 1, 1
else:
t = 4 * (lossval - loss_min) / (loss_max - loss_min + 0.0001)
if t < 1:
return t, 0, 0
if t < 2:
return 2 - t, t - 1, 0
if t < 3:
return 0, 3 - t, t - 2
return 0, 0, 4 - t
def color_fn_bw(lossval):
if lossval in (None, float("inf")):
return 1, 1, 1
else:
t = (lossval - loss_min) / (loss_max - loss_min + 0.0001)
if lossval < colorize_thresh:
return 0.0, 1.0 - t, 0.0 # -- red best black worst
else:
return t, t, t # -- white=worst, black=best
all_labels = list(idxs.keys())
titles = all_labels
order = np.argsort(titles)
C = min(columns, len(all_labels))
R = int(np.ceil(len(all_labels) / float(C)))
for plotnum, varnum in enumerate(order):
label = all_labels[varnum]
plt.subplot(R, C, plotnum + 1)
# hide x ticks
ticks_num, ticks_txt = plt.xticks()
plt.xticks(ticks_num, [""] * len(ticks_num))
dist_name = label
if arrange_by_loss:
x = [loss_by_tid[ii] for ii in idxs[label]]
else:
x = idxs[label]
if "log" in dist_name:
y = np.log(vals[label])
else:
y = vals[label]
plt.title(titles[varnum], fontsize=fontsize)
c = list(map(color_fn_bw, [loss_by_tid[ii] for ii in idxs[label]]))
if len(y):
plt.scatter(x, y, c=c)
if "log" in dist_name:
nums, texts = plt.yticks()
plt.yticks(nums, ["%.2e" % np.exp(t) for t in nums])
if do_show:
plt.show()
def main_plot_1D_attachment(
trials,
attachment_name,
do_show=True,
colorize_by_loss=True,
max_darkness=0.5,
num_trails=None,
preprocessing_fn=lambda x: x,
line_width=0.1,
):
"""
Plots trail attachments, which are 1D-Data.
A legend is only added if the number of plotted elements is < 10.
:param trials: The trials object to gather the attachments from.
:param attachment_name: Thename of the attachment to gather.
:param do_show: If the plot should be shown after creating it.
:param colorize_by_loss: If the lines represening the trial data should be shaded by loss.
:param max_darkness: The maximumg shading darkness (between 0 and 1). Implies colorize_by_loss=True
:param num_trails: The number of trials to plot the attachment for. If none, all trials with a corresponding
attachment are taken. If set to any integer value, the trials are sorted by loss and trials are selected in regular
intervals for plotting. This ensures, that all possible outcomes are equally represented.
:param preprocessing_fn: A preprocessing function to be appleid to the attachment before plotting.
:param line_width: The width of the lines to be plotted.
:return: None
"""
# -- import here because file-level import is too early
import matplotlib.pyplot as plt
plt.title(attachment_name)
lst = [l for l in trials.losses() if l is not None]
min_loss = min(lst)
max_loss = max(lst)
if num_trails is None:
plotted_trials = trials
else:
trials_by_loss = sorted(
filter(lambda t: "loss" in t["result"], trials),
key=lambda t: t["result"]["loss"],
)
plotted_trials = [
trials_by_loss[i]
for i in np.linspace(
0, len(trials_by_loss), num_trails, endpoint=False, dtype=int
)
]
for trial in plotted_trials:
t_attachments = trials.trial_attachments(trial)
if attachment_name in t_attachments:
attachment_data = np.squeeze(
np.asanyarray(pickle.loads(t_attachments[attachment_name]))
)
if len(attachment_data.shape) == 1:
attachment_data = preprocessing_fn(attachment_data)
if colorize_by_loss:
color = (
0.0,
0.0,
0.0,
max_darkness
* (trial["result"]["loss"] - min_loss)
/ (max_loss - min_loss),
)
else:
color = None
plt.plot(
attachment_data,
color=color,
linewidth=line_width,
label="loss: {:.5}".format(trial["result"]["loss"]),
)
else:
pass # TODO: warn about the skipping
if do_show:
if len(plotted_trials) < 10:
plt.legend()
plt.show()
| 7,865 | 29.488372 | 119 |
py
|
hyperopt
|
hyperopt-master/hyperopt/pyll_utils.py
|
from past.builtins import basestring
from functools import partial, wraps
from .base import DuplicateLabel
from .pyll.base import Apply, Literal, MissingArgument
from .pyll import scope
from .pyll import as_apply
def validate_label(f):
@wraps(f)
def wrapper(label, *args, **kwargs):
is_real_string = isinstance(label, basestring)
is_literal_string = isinstance(label, Literal) and isinstance(
label.obj, basestring
)
if not is_real_string and not is_literal_string:
raise TypeError("require string label")
return f(label, *args, **kwargs)
return wrapper
def validate_distribution_range(f):
@wraps(f)
def wrapper(label, *args, **kwargs):
min_val = (
args[0] if len(args) > 0 else (kwargs["low"] if "low" in kwargs else None)
)
max_val = (
args[1] if len(args) > 1 else (kwargs["high"] if "high" in kwargs else None)
)
if min_val and max_val and not min_val < max_val:
raise ValueError(
"low should be less than high: %s is not smaller than %s"
% (min_val, max_val)
)
return f(label, *args, **kwargs)
return wrapper
#
# Hyperparameter Types
#
@scope.define
def hyperopt_param(label, obj):
"""A graph node primarily for annotating - VectorizeHelper looks out
for these guys, and optimizes subgraphs of the form:
hyperopt_param(<stochastic_expression>(...))
"""
return obj
@validate_label
def hp_pchoice(label, p_options):
"""
label: string
p_options: list of (probability, option) pairs
"""
p, options = list(zip(*p_options))
ch = scope.hyperopt_param(label, scope.categorical(p))
return scope.switch(ch, *options)
@validate_label
def hp_choice(label, options):
ch = scope.hyperopt_param(label, scope.randint(len(options)))
return scope.switch(ch, *options)
@validate_label
def hp_randint(label, *args, **kwargs):
return scope.hyperopt_param(label, scope.randint(*args, **kwargs))
@validate_label
@validate_distribution_range
def hp_uniform(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.uniform(*args, **kwargs)))
@validate_label
def hp_uniformint(label, *args, **kwargs):
kwargs["q"] = 1.0
return scope.int(hp_quniform(label, *args, **kwargs))
@validate_label
@validate_distribution_range
def hp_quniform(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.quniform(*args, **kwargs)))
@validate_label
@validate_distribution_range
def hp_loguniform(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.loguniform(*args, **kwargs)))
@validate_label
@validate_distribution_range
def hp_qloguniform(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.qloguniform(*args, **kwargs)))
@validate_label
def hp_normal(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.normal(*args, **kwargs)))
@validate_label
def hp_qnormal(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.qnormal(*args, **kwargs)))
@validate_label
def hp_lognormal(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.lognormal(*args, **kwargs)))
@validate_label
def hp_qlognormal(label, *args, **kwargs):
return scope.float(scope.hyperopt_param(label, scope.qlognormal(*args, **kwargs)))
#
# Tools for extracting a search space from a Pyll graph
#
class Cond:
def __init__(self, name, val, op):
self.op = op
self.name = name
self.val = val
def __str__(self):
return f"Cond{{{self.name} {self.op} {self.val}}}"
def __eq__(self, other):
return self.op == other.op and self.name == other.name and self.val == other.val
def __hash__(self):
return hash((self.op, self.name, self.val))
def __repr__(self):
return str(self)
EQ = partial(Cond, op="=")
def _expr_to_config(expr, conditions, hps):
if expr.name == "switch":
idx = expr.inputs()[0]
options = expr.inputs()[1:]
assert idx.name == "hyperopt_param"
assert idx.arg["obj"].name in (
"randint", # -- in case of hp.choice
"categorical", # -- in case of hp.pchoice
)
_expr_to_config(idx, conditions, hps)
for ii, opt in enumerate(options):
_expr_to_config(opt, conditions + (EQ(idx.arg["label"].obj, ii),), hps)
elif expr.name == "hyperopt_param":
label = expr.arg["label"].obj
if label in hps:
if hps[label]["node"] != expr.arg["obj"]:
raise DuplicateLabel(label)
hps[label]["conditions"].add(conditions)
else:
hps[label] = {
"node": expr.arg["obj"],
"conditions": {conditions},
"label": label,
}
else:
for ii in expr.inputs():
_expr_to_config(ii, conditions, hps)
def expr_to_config(expr, conditions, hps):
"""
Populate dictionary `hps` with the hyperparameters in pyll graph `expr`
and conditions for participation in the evaluation of `expr`.
Arguments:
expr - a pyll expression root.
conditions - a tuple of conditions (`Cond`) that must be True for
`expr` to be evaluated.
hps - dictionary to populate
Creates `hps` dictionary:
label -> { 'node': apply node of hyperparameter distribution,
'conditions': `conditions` + tuple,
'label': label
}
"""
expr = as_apply(expr)
if conditions is None:
conditions = ()
assert isinstance(expr, Apply)
_expr_to_config(expr, conditions, hps)
_remove_allpaths(hps, conditions)
def _remove_allpaths(hps, conditions):
"""Hacky way to recognize some kinds of false dependencies
Better would be logic programming.
"""
potential_conds = {}
for k, v in list(hps.items()):
if v["node"].name == "randint":
low = v["node"].arg["low"].obj
# if high is None, the domain is [0, low), else it is [low, high)
domain_size = (
v["node"].arg["high"].obj - low
if v["node"].arg["high"] != MissingArgument
else low
)
potential_conds[k] = frozenset([EQ(k, ii) for ii in range(domain_size)])
elif v["node"].name == "categorical":
p = v["node"].arg["p"].obj
potential_conds[k] = frozenset([EQ(k, ii) for ii in range(p.size)])
for k, v in list(hps.items()):
if len(v["conditions"]) > 1:
all_conds = [[c for c in cond if c is not True] for cond in v["conditions"]]
all_conds = [cond for cond in all_conds if len(cond) >= 1]
if len(all_conds) == 0:
v["conditions"] = {conditions}
continue
depvar = all_conds[0][0].name
all_one_var = all(
len(cond) == 1 and cond[0].name == depvar for cond in all_conds
)
if all_one_var:
conds = [cond[0] for cond in all_conds]
if frozenset(conds) == potential_conds[depvar]:
v["conditions"] = {conditions}
continue
# -- eof
| 7,429 | 28.601594 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/progress.py
|
"""
Progress is reported using context managers.
A progress context manager takes an `initial` and a `total` argument
and should yield an object with an `update(n)` method.
"""
import contextlib
from tqdm import tqdm
from .std_out_err_redirect_tqdm import std_out_err_redirect_tqdm
@contextlib.contextmanager
def tqdm_progress_callback(initial, total):
with std_out_err_redirect_tqdm() as wrapped_stdout, tqdm(
total=total,
file=wrapped_stdout,
postfix={"best loss": "?"},
disable=False,
dynamic_ncols=True,
unit="trial",
initial=initial,
) as pbar:
yield pbar
@contextlib.contextmanager
def no_progress_callback(initial, total):
class NoProgressContext:
def update(self, n):
pass
yield NoProgressContext()
default_callback = tqdm_progress_callback
"""Use tqdm for progress by default"""
| 898 | 22.051282 | 68 |
py
|
hyperopt
|
hyperopt-master/hyperopt/vectorize.py
|
import sys
import numpy as np
from .pyll import Apply
from .pyll import as_apply
from .pyll import dfs
from .pyll import toposort
from .pyll import scope
from .pyll import stochastic
stoch = stochastic.implicit_stochastic_symbols
def ERR(msg):
print("hyperopt.vectorize.ERR", msg, file=sys.stderr)
@scope.define_pure
def vchoice_split(idxs, choices, n_options):
rval = [[] for ii in range(n_options)]
if len(idxs) != len(choices):
raise ValueError("idxs and choices different len", (len(idxs), len(choices)))
for ii, cc in zip(idxs, choices):
rval[cc].append(ii)
return rval
@scope.define_pure
def vchoice_merge(idxs, choices, *vals):
rval = []
assert len(idxs) == len(choices)
for idx, ch in zip(idxs, choices):
vi, vv = vals[ch]
rval.append(vv[list(vi).index(idx)])
return rval
@scope.define_pure
def idxs_map(idxs, cmd, *args, **kwargs):
"""
Return the cmd applied at positions idxs, by retrieving args and kwargs
from the (idxs, vals) pair elements of `args` and `kwargs`.
N.B. args and kwargs may generally include information for more idx values
than are requested by idxs.
"""
# XXX: consider insisting on sorted idxs
# XXX: use np.searchsorted instead of dct
if 0: # these should all be true, but evaluating them is slow
for ii, (idxs_ii, vals_ii) in enumerate(args):
for jj in idxs:
assert jj in idxs_ii
for kw, (idxs_kw, vals_kw) in list(kwargs.items()):
for jj in idxs:
assert jj in idxs_kw
args_imap = []
for idxs_j, vals_j in args:
d = dict(list(zip(idxs_j, vals_j))) if len(idxs_j) else {}
args_imap.append(d)
kwargs_imap = {}
for kw, (idxs_j, vals_j) in list(kwargs.items()):
if len(idxs_j):
kwargs_imap[kw] = dict(list(zip(idxs_j, vals_j)))
else:
kwargs_imap[kw] = {}
f = scope._impls[cmd]
rval = []
for ii in idxs:
try:
args_nn = [arg_imap[ii] for arg_imap in args_imap]
except:
ERR("args_nn %s" % cmd)
ERR("ii %s" % ii)
ERR("arg_imap %s" % str(args_imap))
ERR("args_imap %s" % str(args_imap))
raise
try:
kwargs_nn = {kw: arg_imap[ii] for kw, arg_imap in list(kwargs_imap.items())}
except:
ERR("args_nn %s" % cmd)
ERR("ii %s" % ii)
ERR("kw %s" % kw)
ERR("arg_imap %s" % str(args_imap))
raise
try:
rval_nn = f(*args_nn, **kwargs_nn)
except:
ERR("error calling impl of %s" % cmd)
raise
rval.append(rval_nn)
return rval
@scope.define_pure
def idxs_take(idxs, vals, which):
"""
Return `vals[which]` where `which` is a subset of `idxs`
"""
# TODO: consider insisting on sorted idxs
# TODO: use np.searchsorted instead of dct
assert len(idxs) == len(vals)
table = dict(list(zip(idxs, vals)))
return np.asarray([table[w] for w in which])
@scope.define_pure
def uniq(lst):
s = set()
rval = []
for l in lst:
if id(l) not in s:
s.add(id(l))
rval.append(l)
return rval
def vectorize_stochastic(orig):
if orig.name == "idxs_map" and orig.pos_args[1]._obj in stoch:
# -- this is an idxs_map of a random draw of distribution `dist`
idxs = orig.pos_args[0]
dist = orig.pos_args[1]._obj
def foo(arg):
# -- each argument is an idxs, vals pair
assert arg.name == "pos_args"
assert len(arg.pos_args) == 2
arg_vals = arg.pos_args[1]
# XXX: write a pattern-substitution rule for this case
if arg_vals.name == "idxs_take":
if arg_vals.arg["vals"].name == "asarray":
if arg_vals.arg["vals"].inputs()[0].name == "repeat":
# -- draws are iid, so forget about
# repeating the distribution parameters
repeated_thing = arg_vals.arg["vals"].inputs()[0].inputs()[1]
return repeated_thing
if arg.pos_args[0] is idxs:
return arg_vals
else:
# -- arg.pos_args[0] is a superset of idxs
# TODO: slice out correct elements using
# idxs_take, but more importantly - test this case.
raise NotImplementedError()
new_pos_args = [foo(arg) for arg in orig.pos_args[2:]]
new_named_args = [[aname, foo(arg)] for aname, arg in orig.named_args]
vnode = Apply(dist, new_pos_args, new_named_args, o_len=None)
n_times = scope.len(idxs)
if "size" in dict(vnode.named_args):
raise NotImplementedError("random node already has size")
vnode.named_args.append(["size", n_times])
return vnode
else:
return orig
def replace_repeat_stochastic(expr, return_memo=False):
nodes = dfs(expr)
memo = {}
for ii, orig in enumerate(nodes):
if orig.name == "idxs_map" and orig.pos_args[1]._obj in stoch:
# -- this is an idxs_map of a random draw of distribution `dist`
idxs = orig.pos_args[0]
dist = orig.pos_args[1]._obj
def foo(arg):
# -- each argument is an idxs, vals pair
assert arg.name == "pos_args"
assert len(arg.pos_args) == 2
arg_vals = arg.pos_args[1]
if arg_vals.name == "asarray" and arg_vals.inputs()[0].name == "repeat":
# -- draws are iid, so forget about
# repeating the distribution parameters
repeated_thing = arg_vals.inputs()[0].inputs()[1]
return repeated_thing
else:
if arg.pos_args[0] is idxs:
return arg_vals
# -- arg.pos_args[0] is a superset of idxs
# TODO: slice out correct elements using
# idxs_take, but more importantly - test this case.
raise NotImplementedError()
new_pos_args = [foo(arg) for arg in orig.pos_args[2:]]
new_named_args = [[aname, foo(arg)] for aname, arg in orig.named_args]
vnode = Apply(dist, new_pos_args, new_named_args, None)
n_times = scope.len(idxs)
if "size" in dict(vnode.named_args):
raise NotImplementedError("random node already has size")
vnode.named_args.append(["size", n_times])
# -- loop over all nodes that *use* this one, and change them
for client in nodes[ii + 1 :]:
client.replace_input(orig, vnode)
if expr is orig:
expr = vnode
memo[orig] = vnode
if return_memo:
return expr, memo
return expr
class VectorizeHelper:
"""
Convert a pyll expression representing a single trial into a pyll
expression representing multiple trials.
The resulting multi-trial expression is not meant to be evaluated
directly. It is meant to serve as the input to a suggest algo.
idxs_memo - node in expr graph -> all elements we might need for it
take_memo - node in expr graph -> all exprs retrieving computed elements
"""
def __init__(self, expr, expr_idxs, build=True):
self.expr = expr
self.expr_idxs = expr_idxs
self.dfs_nodes = dfs(expr)
self.params = {}
for ii, node in enumerate(self.dfs_nodes):
if node.name == "hyperopt_param":
label = node.arg["label"].obj
self.params[label] = node.arg["obj"]
# -- recursive construction
# This makes one term in each idxs, vals memo for every
# directed path through the switches in the graph.
self.idxs_memo = {} # node -> union, all idxs computed
self.take_memo = {} # node -> list of idxs_take retrieving node vals
self.v_expr = self.build_idxs_vals(expr, expr_idxs)
# TODO: graph-optimization pass to remove cruft:
# - unions of 1
# - unions of full sets with their subsets
# - idxs_take that can be merged
self.assert_integrity_idxs_take()
def assert_integrity_idxs_take(self):
idxs_memo = self.idxs_memo
take_memo = self.take_memo
after = dfs(self.expr)
assert after == self.dfs_nodes
assert set(idxs_memo.keys()) == set(take_memo.keys())
for node in idxs_memo:
idxs = idxs_memo[node]
assert idxs.name == "array_union"
vals = take_memo[node][0].pos_args[1]
for take in take_memo[node]:
assert take.name == "idxs_take"
assert [idxs, vals] == take.pos_args[:2]
def build_idxs_vals(self, node, wanted_idxs):
"""
This recursive procedure should be called on an output-node.
"""
checkpoint_asserts = False
def checkpoint():
if checkpoint_asserts:
self.assert_integrity_idxs_take()
if node in self.idxs_memo:
toposort(self.idxs_memo[node])
if node in self.take_memo:
for take in self.take_memo[node]:
toposort(take)
checkpoint()
# wanted_idxs are fixed, whereas idxs_memo
# is full of unions, that can grow in subsequent recursive
# calls to build_idxs_vals with node as argument.
assert wanted_idxs != self.idxs_memo.get(node)
# -- easy exit case
if node.name == "hyperopt_param":
# -- ignore, not vectorizing
return self.build_idxs_vals(node.arg["obj"], wanted_idxs)
# -- easy exit case
elif node.name == "hyperopt_result":
# -- ignore, not vectorizing
return self.build_idxs_vals(node.arg["obj"], wanted_idxs)
# -- literal case: always take from universal set
elif node.name == "literal":
if node in self.idxs_memo:
all_idxs, all_vals = self.take_memo[node][0].pos_args[:2]
wanted_vals = scope.idxs_take(all_idxs, all_vals, wanted_idxs)
self.take_memo[node].append(wanted_vals)
checkpoint()
else:
# -- initialize idxs_memo to full set
all_idxs = self.expr_idxs
n_times = scope.len(all_idxs)
# -- put array_union into graph for consistency, though it is
# not necessary
all_idxs = scope.array_union(all_idxs)
self.idxs_memo[node] = all_idxs
all_vals = scope.asarray(scope.repeat(n_times, node))
wanted_vals = scope.idxs_take(all_idxs, all_vals, wanted_idxs)
assert node not in self.take_memo
self.take_memo[node] = [wanted_vals]
checkpoint()
return wanted_vals
# -- switch case: complicated
elif node.name == "switch":
if node in self.idxs_memo and wanted_idxs in self.idxs_memo[node].pos_args:
# -- phew, easy case
all_idxs, all_vals = self.take_memo[node][0].pos_args[:2]
wanted_vals = scope.idxs_take(all_idxs, all_vals, wanted_idxs)
self.take_memo[node].append(wanted_vals)
checkpoint()
else:
# -- we need to add some indexes
if node in self.idxs_memo:
all_idxs = self.idxs_memo[node]
assert all_idxs.name == "array_union"
all_idxs.pos_args.append(wanted_idxs)
else:
all_idxs = scope.array_union(wanted_idxs)
choice = node.pos_args[0]
all_choices = self.build_idxs_vals(choice, all_idxs)
options = node.pos_args[1:]
args_idxs = scope.vchoice_split(all_idxs, all_choices, len(options))
all_vals = scope.vchoice_merge(all_idxs, all_choices)
for opt_ii, idxs_ii in zip(options, args_idxs):
all_vals.pos_args.append(
as_apply([idxs_ii, self.build_idxs_vals(opt_ii, idxs_ii)])
)
wanted_vals = scope.idxs_take(
all_idxs, # -- may grow in future
all_vals, # -- may be replaced in future
wanted_idxs,
) # -- fixed.
if node in self.idxs_memo:
assert self.idxs_memo[node].name == "array_union"
self.idxs_memo[node].pos_args.append(wanted_idxs)
for take in self.take_memo[node]:
assert take.name == "idxs_take"
take.pos_args[1] = all_vals
self.take_memo[node].append(wanted_vals)
else:
self.idxs_memo[node] = all_idxs
self.take_memo[node] = [wanted_vals]
checkpoint()
# -- general case
else:
# -- this is a general node.
# It is generally handled with idxs_memo,
# but vectorize_stochastic may immediately transform it into
# a more compact form.
if node in self.idxs_memo and wanted_idxs in self.idxs_memo[node].pos_args:
# -- phew, easy case
for take in self.take_memo[node]:
if take.pos_args[2] == wanted_idxs:
return take
raise NotImplementedError("how did this happen?")
# all_idxs, all_vals = self.take_memo[node][0].pos_args[:2]
# wanted_vals = scope.idxs_take(all_idxs, all_vals, wanted_idxs)
# self.take_memo[node].append(wanted_vals)
# checkpoint()
else:
# XXX
# -- determine if wanted_idxs is actually a subset of the idxs
# that we are already computing. This is not only an
# optimization, but prevents the creation of cycles, which
# would otherwise occur if we have a graph of the form
# switch(f(a), g(a), 0). If there are other switches inside f
# and g, does this get trickier?
# -- assume we need to add some indexes
checkpoint()
if node in self.idxs_memo:
all_idxs = self.idxs_memo[node]
else:
all_idxs = scope.array_union(wanted_idxs)
checkpoint()
all_vals = scope.idxs_map(all_idxs, node.name)
for ii, aa in enumerate(node.pos_args):
all_vals.pos_args.append(
as_apply([all_idxs, self.build_idxs_vals(aa, all_idxs)])
)
checkpoint()
for ii, (nn, aa) in enumerate(node.named_args):
all_vals.named_args.append(
[nn, as_apply([all_idxs, self.build_idxs_vals(aa, all_idxs)])]
)
checkpoint()
all_vals = vectorize_stochastic(all_vals)
checkpoint()
wanted_vals = scope.idxs_take(
all_idxs, # -- may grow in future
all_vals, # -- may be replaced in future
wanted_idxs,
) # -- fixed.
if node in self.idxs_memo:
assert self.idxs_memo[node].name == "array_union"
self.idxs_memo[node].pos_args.append(wanted_idxs)
toposort(self.idxs_memo[node])
# -- this catches the cycle bug mentioned above
for take in self.take_memo[node]:
assert take.name == "idxs_take"
take.pos_args[1] = all_vals
self.take_memo[node].append(wanted_vals)
else:
self.idxs_memo[node] = all_idxs
self.take_memo[node] = [wanted_vals]
checkpoint()
return wanted_vals
def idxs_by_label(self):
return {name: self.idxs_memo[node] for name, node in list(self.params.items())}
def vals_by_label(self):
return {
name: self.take_memo[node][0].pos_args[1]
for name, node in list(self.params.items())
}
| 16,760 | 37.619816 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/utils.py
|
from future import standard_library
from past.builtins import basestring
from past.utils import old_div
import datetime
import numpy as np
import logging
import os
import shutil
import sys
import uuid
import numpy
from . import pyll
from contextlib import contextmanager
standard_library.install_aliases()
def _get_random_id():
"""
Generates a random ID.
"""
return uuid.uuid4().hex[-12:]
def _get_logger(name):
"""Gets a logger by name, or creates and configures it for the first time."""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# If the logger is configured, skip the configure
if not logger.handlers and not logging.getLogger().handlers:
handler = logging.StreamHandler(sys.stderr)
logger.addHandler(handler)
return logger
logger = _get_logger(__name__)
try:
import cloudpickle as pickler
except Exception as e:
logger.info(
'Failed to load cloudpickle, try installing cloudpickle via "pip install cloudpickle" for enhanced pickling support.'
)
import pickle as pickler
def import_tokens(tokens):
# XXX Document me
# import as many as we can
rval = None
for i in range(len(tokens)):
modname = ".".join(tokens[: i + 1])
# XXX: try using getattr, and then merge with load_tokens
try:
logger.info("importing %s" % modname)
exec(f"import {modname}")
exec(f"rval = {modname}")
except ImportError as e:
logger.info("failed to import %s" % modname)
logger.info("reason: %s" % str(e))
break
return rval, tokens[i:]
def load_tokens(tokens):
# XXX: merge with import_tokens
logger.info("load_tokens: %s" % str(tokens))
symbol, remainder = import_tokens(tokens)
for attr in remainder:
symbol = getattr(symbol, attr)
return symbol
def json_lookup(json):
symbol = load_tokens(json.split("."))
return symbol
def json_call(json, args=(), kwargs=None):
"""
Return a dataset class instance based on a string, tuple or dictionary
.. code-block:: python
iris = json_call('datasets.toy.Iris')
This function works by parsing the string, and calling import and getattr a
lot. (XXX)
"""
if kwargs is None:
kwargs = {}
if isinstance(json, basestring):
symbol = json_lookup(json)
return symbol(*args, **kwargs)
elif isinstance(json, dict):
raise NotImplementedError("dict calling convention undefined", json)
elif isinstance(json, (tuple, list)):
raise NotImplementedError("seq calling convention undefined", json)
else:
raise TypeError(json)
def get_obj(f, argfile=None, argstr=None, args=(), kwargs=None):
"""
XXX: document me
"""
if kwargs is None:
kwargs = {}
if argfile is not None:
argstr = open(argfile).read()
if argstr is not None:
argd = pickler.loads(argstr)
else:
argd = {}
args = args + argd.get("args", ())
kwargs.update(argd.get("kwargs", {}))
return json_call(f, args=args, kwargs=kwargs)
def pmin_sampled(mean, var, n_samples=1000, rng=None):
"""Probability that each Gaussian-dist R.V. is less than the others
:param vscores: mean vector
:param var: variance vector
This function works by sampling n_samples from every (gaussian) mean distribution,
and counting up the number of times each element's sample is the best.
"""
if rng is None:
rng = numpy.random.default_rng(232342)
samples = rng.standard_normal((n_samples, len(mean))) * numpy.sqrt(var) + mean
winners = (samples.T == samples.min(axis=1)).T
wincounts = winners.sum(axis=0)
assert wincounts.shape == mean.shape
return old_div(wincounts.astype("float64"), wincounts.sum())
def fast_isin(X, Y):
"""
Indices of elements in a numpy array that appear in another.
Fast routine for determining indices of elements in numpy array `X` that
appear in numpy array `Y`, returning a boolean array `Z` such that::
Z[i] = X[i] in Y
"""
if len(Y) > 0:
T = Y.copy()
T.sort()
D = T.searchsorted(X)
T = np.append(T, np.array([0]))
W = T[D] == X
if isinstance(W, bool):
return np.zeros((len(X),), bool)
else:
return T[D] == X
else:
return np.zeros((len(X),), bool)
def get_most_recent_inds(obj):
data = numpy.rec.array(
[(x["_id"], int(x["version"])) for x in obj], names=["_id", "version"]
)
s = data.argsort(order=["_id", "version"])
data = data[s]
recent = (data["_id"][1:] != data["_id"][:-1]).nonzero()[0]
recent = numpy.append(recent, [len(data) - 1])
return s[recent]
def use_obj_for_literal_in_memo(expr, obj, lit, memo):
"""
Set `memo[node] = obj` for all nodes in expr such that `node.obj == lit`
This is a useful routine for fmin-compatible functions that are searching
domains that include some leaf nodes that are complicated
runtime-generated objects. One option is to make such leaf nodes pyll
functions, but it can be easier to construct those objects the normal
Python way in the fmin function, and just stick them into the evaluation
memo. The experiment ctrl object itself is inserted using this technique.
"""
for node in pyll.dfs(expr):
try:
if node.obj == lit:
memo[node] = obj
except (AttributeError, ValueError) as e:
# -- non-literal nodes don't have node.obj
pass
return memo
def coarse_utcnow():
"""
# MongoDB stores only to the nearest millisecond
# This is mentioned in a footnote here:
# http://api.mongodb.org/python/current/api/bson/son.html#dt
"""
now = datetime.datetime.utcnow()
microsec = (now.microsecond // 10**3) * (10**3)
return datetime.datetime(
now.year, now.month, now.day, now.hour, now.minute, now.second, microsec
)
@contextmanager
def working_dir(dir):
cwd = os.getcwd()
os.chdir(dir)
yield
os.chdir(cwd)
def path_split_all(path):
"""split a path at all path separaters, return list of parts"""
parts = []
while True:
path, fn = os.path.split(path)
if len(fn) == 0:
break
parts.append(fn)
return reversed(parts)
def get_closest_dir(workdir):
"""
returns the topmost already-existing directory in the given path
erasing work-dirs should never progress above this file.
Also returns the name of first non-existing dir for use as filename.
"""
closest_dir = ""
for wdi in path_split_all(workdir):
if os.path.isdir(os.path.join(closest_dir, wdi)):
closest_dir = os.path.join(closest_dir, wdi)
else:
break
assert closest_dir != workdir
return closest_dir, wdi
@contextmanager
def temp_dir(dir, erase_after=False, with_sentinel=True):
created_by_me = False
if not os.path.exists(dir):
if os.pardir in dir:
raise RuntimeError("workdir contains os.pardir ('..')")
if erase_after and with_sentinel:
closest_dir, fn = get_closest_dir(dir)
sentinel = os.path.join(closest_dir, fn + ".inuse")
open(sentinel, "w").close()
os.makedirs(dir)
created_by_me = True
else:
assert os.path.isdir(dir)
yield
if erase_after and created_by_me:
# erase all files in workdir
shutil.rmtree(dir)
if with_sentinel:
# put dir back as starting point for recursive remove
os.mkdir(dir)
# also try to erase any other empty directories up to
# sentinel file
os.removedirs(dir)
# remove sentinel file
os.remove(sentinel)
| 7,894 | 27.919414 | 125 |
py
|
hyperopt
|
hyperopt-master/hyperopt/hp.py
|
"""
Support nicer user syntax:
from hyperopt import hp
hp.uniform('x', 0, 1)
"""
from .pyll_utils import hp_choice as choice
from .pyll_utils import hp_randint as randint
from .pyll_utils import hp_pchoice as pchoice
from .pyll_utils import hp_uniform as uniform
from .pyll_utils import hp_uniformint as uniformint
from .pyll_utils import hp_quniform as quniform
from .pyll_utils import hp_loguniform as loguniform
from .pyll_utils import hp_qloguniform as qloguniform
from .pyll_utils import hp_normal as normal
from .pyll_utils import hp_qnormal as qnormal
from .pyll_utils import hp_lognormal as lognormal
from .pyll_utils import hp_qlognormal as qlognormal
| 671 | 32.6 | 53 |
py
|
hyperopt
|
hyperopt-master/hyperopt/graph_viz.py
|
"""
Use graphviz's dot language to express the relationship between hyperparamters
in a search space.
"""
from future import standard_library
import io
from .pyll_utils import expr_to_config
standard_library.install_aliases()
def dot_hyperparameters(expr):
"""
Return a dot language specification of a graph which describes the
relationship between hyperparameters. Each hyperparameter within the
pyll expression `expr` is represented by a rectangular node, and
each value of each choice node that creates a conditional variable
in the search space is represented by an elliptical node.
The direction of the arrows corresponds to the sequence of events
in an ancestral sampling process.
E.g.:
>>> open('foo.dot', 'w').write(dot_hyperparameters(search_space()))
Then later from the shell, type e.g.
dot -Tpng foo.dot > foo.png && eog foo.png
Graphviz has other tools too: http://www.graphviz.org
"""
conditions = ()
hps = {}
expr_to_config(expr, conditions, hps)
rval = io.StringIO()
print("digraph {", file=rval)
edges = set()
def var_node(a):
print('"%s" [ shape=box];' % a, file=rval)
def cond_node(a):
print('"%s" [ shape=ellipse];' % a, file=rval)
def edge(a, b):
text = f'"{a}" -> "{b}";'
if text not in edges:
print(text, file=rval)
edges.add(text)
for hp, dct in list(hps.items()):
# create the node
var_node(hp)
# create an edge from anything it depends on
for and_conds in dct["conditions"]:
if len(and_conds) > 1:
parent_label = " & ".join(
["%(name)s%(op)s%(val)s" % cond.__dict__ for cond in and_conds]
)
cond_node(parent_label)
edge(parent_label, hp)
for cond in and_conds:
sub_parent_label = f"{cond.name}{cond.op}{cond.val}"
cond_node(sub_parent_label)
edge(cond.name, sub_parent_label)
edge(sub_parent_label, parent_label)
elif len(and_conds) == 1:
parent_label = "{}{}{}".format(
and_conds[0].name,
and_conds[0].op,
and_conds[0].val,
)
edge(and_conds[0].name, parent_label)
cond_node(parent_label)
edge(parent_label, hp)
print("}", file=rval)
return rval.getvalue()
| 2,534 | 30.296296 | 83 |
py
|
hyperopt
|
hyperopt-master/hyperopt/spark.py
|
import copy
import threading
import time
import timeit
import traceback
from hyperopt import base, fmin, Trials
from hyperopt.base import validate_timeout, validate_loss_threshold
from hyperopt.utils import coarse_utcnow, _get_logger, _get_random_id
try:
from py4j.clientserver import ClientServer
from pyspark.sql import SparkSession
from pyspark.util import VersionUtils
import pyspark
_have_spark = True
_spark_major_minor_version = VersionUtils.majorMinorVersion(pyspark.__version__)
except ImportError as e:
_have_spark = False
_spark_major_minor_version = None
logger = _get_logger("hyperopt-spark")
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
self._spark_pinned_threads_enabled = isinstance(
self._spark_context._gateway, ClientServer
)
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
self._spark_pinned_threads_enabled
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
class _SparkFMinState:
"""
Class for managing threads which run concurrent Spark jobs.
This maintains a primary dispatcher thread, plus 1 thread per Hyperopt trial.
Each trial's thread runs 1 Spark job with 1 task.
"""
def __init__(self, spark, eval_function, space, trials):
self.spark = spark
self.eval_function = eval_function
self.space = space
self.trials = trials
self._fmin_done = False
self._dispatcher_thread = None
self._task_threads = set()
if self.trials._spark_supports_job_cancelling:
spark_context = spark.sparkContext
self._job_group_id = spark_context.getLocalProperty("spark.jobGroup.id")
self._job_desc = spark_context.getLocalProperty("spark.job.description")
interrupt_on_cancel = spark_context.getLocalProperty(
"spark.job.interruptOnCancel"
)
if interrupt_on_cancel is None:
self._job_interrupt_on_cancel = False
else:
self._job_interrupt_on_cancel = "true" == interrupt_on_cancel.lower()
# In certain Spark deployments, the local property "spark.jobGroup.id"
# value is None, so we create one to use for SparkTrials.
if self._job_group_id is None:
self._job_group_id = "Hyperopt_SparkTrials_" + _get_random_id()
if self._job_desc is None:
self._job_desc = "Trial evaluation jobs launched by hyperopt fmin"
logger.debug(
"Job group id: {g}, job desc: {d}, job interrupt on cancel: {i}".format(
g=self._job_group_id,
d=self._job_desc,
i=self._job_interrupt_on_cancel,
)
)
def running_trial_count(self):
return self.trials.count_by_state_unsynced(base.JOB_STATE_RUNNING)
@staticmethod
def _begin_trial_run(trial):
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
logger.debug("trial task {tid} started".format(tid=trial["tid"]))
@staticmethod
def _get_traceback(err):
return err.__dict__.get("_tb_str")
def _finish_trial_run(self, is_success, is_cancelled, trial, data):
"""
Call this method when a trial evaluation finishes. It will save results to the
trial object and update task counters.
:param is_success: whether the trial succeeded
:param is_cancelled: whether the trial was cancelled
:param data: If the trial succeeded, this is the return value from the trial
task function. Otherwise, this is the exception raised when running the trial
task.
"""
if is_cancelled:
logger.debug(
"trial task {tid} cancelled, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_cancellation_back(trial, e=data)
elif is_success:
logger.debug(
"trial task {tid} succeeded, result is {r}".format(
tid=trial["tid"], r=data
)
)
self._write_result_back(trial, result=data)
else:
logger.error(
"trial task {tid} failed, exception is {e}.\n {tb}".format(
tid=trial["tid"], e=str(data), tb=self._get_traceback(data)
)
)
self._write_exception_back(trial, e=data)
def launch_dispatcher(self):
def run_dispatcher():
start_time = timeit.default_timer()
while not self._fmin_done:
new_tasks = self._poll_new_tasks()
for trial in new_tasks:
self._run_trial_async(trial)
cur_time = timeit.default_timer()
elapsed_time = cur_time - start_time
# In the future, timeout checking logic could be moved to `fmin`.
# For now, timeouts are specific to SparkTrials.
# When a timeout happens:
# - Set `trials._fmin_cancelled` flag to be True.
# - FMinIter checks this flag and exits if it is set to True.
if (
self.trials.timeout is not None
and elapsed_time > self.trials.timeout
and not self.trials._fmin_cancelled
):
self.trials._fmin_cancelled = True
self.trials._fmin_cancelled_reason = "fmin run timeout"
self._cancel_running_trials()
logger.warning(
"fmin cancelled because of "
+ self.trials._fmin_cancelled_reason
)
time.sleep(1)
if self.trials._fmin_cancelled:
# Because cancelling fmin triggered, warn that the dispatcher won't launch
# more trial tasks.
logger.warning("fmin is cancelled, so new trials will not be launched.")
logger.debug("dispatcher thread exits normally.")
self._dispatcher_thread = threading.Thread(target=run_dispatcher)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.start()
@staticmethod
def _get_spec_from_trial(trial):
return base.spec_from_misc(trial["misc"])
@staticmethod
def _write_result_back(trial, result):
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
def _write_exception_back(self, trial, e):
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), self._get_traceback(e))
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_cancellation_back(trial, e):
trial["state"] = base.JOB_STATE_CANCEL
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
def _run_trial_async(self, trial):
def finish_trial_run(result_or_e):
if not isinstance(result_or_e, BaseException):
self._finish_trial_run(
is_success=True,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread exits normally and writes results "
"back correctly.".format(tid=trial["tid"])
)
else:
self._finish_trial_run(
is_success=False,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread catches an exception and writes the "
"info back correctly.".format(tid=trial["tid"])
)
def run_task_thread():
local_eval_function, local_space = self.eval_function, self.space
params = self._get_spec_from_trial(trial)
def run_task_on_executor(_):
domain = base.Domain(
local_eval_function, local_space, pass_expr_memo_ctrl=None
)
try:
result = domain.evaluate(
params, ctrl=None, attach_attachments=False
)
yield result
except BaseException as e:
# Because the traceback is not pickable, we need format it and pass it back
# to driver
_traceback_string = traceback.format_exc()
logger.error(_traceback_string)
e._tb_str = _traceback_string
yield e
try:
worker_rdd = self.spark.sparkContext.parallelize([0], 1)
if self.trials._spark_supports_job_cancelling:
if self.trials._spark_pinned_threads_enabled:
spark_context = self.spark.sparkContext
spark_context.setLocalProperty(
"spark.jobGroup.id", self._job_group_id
)
spark_context.setLocalProperty(
"spark.job.description", self._job_desc
)
spark_context.setLocalProperty(
"spark.job.interruptOnCancel",
str(self._job_interrupt_on_cancel).lower(),
)
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collectWithJobGroup(
self._job_group_id,
self._job_desc,
self._job_interrupt_on_cancel,
)[
0
]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
except BaseException as e:
# I recommend to catch all exceptions here, it can make the program more robust.
# There're several possible reasons lead to raising exception here.
# so I use `except BaseException` here.
#
# If cancelled flag is set, it represent we need to cancel all running tasks,
# Otherwise it represent the task failed.
finish_trial_run(e)
else:
# The exceptions captured in run_task_on_executor would be returned in the result_or_e
finish_trial_run(result_or_e)
if self.trials._spark_pinned_threads_enabled:
try:
# pylint: disable=no-name-in-module,import-outside-toplevel
from pyspark import inheritable_thread_target
run_task_thread = inheritable_thread_target(run_task_thread)
except ImportError:
pass
task_thread = threading.Thread(target=run_task_thread)
task_thread.setDaemon(True)
task_thread.start()
self._task_threads.add(task_thread)
def _poll_new_tasks(self):
new_task_list = []
for trial in copy.copy(self.trials.trials):
if trial["state"] == base.JOB_STATE_NEW:
# check parallelism limit
if self.running_trial_count() >= self.trials.parallelism:
break
new_task_list.append(trial)
self._begin_trial_run(trial)
return new_task_list
def _cancel_running_trials(self):
if self.trials._spark_supports_job_cancelling:
logger.debug(
"Cancelling all running jobs in job group {g}".format(
g=self._job_group_id
)
)
self.spark.sparkContext.cancelJobGroup(self._job_group_id)
# Make a copy of trials by slicing
for trial in self.trials.trials[:]:
if trial["state"] in [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]:
trial["state"] = base.JOB_STATE_CANCEL
else:
logger.info(
"Because the current Apache PySpark version does not support "
"cancelling jobs by job group ID, SparkTrials will block until all of "
"its running Spark jobs finish."
)
def wait_for_all_threads(self):
"""
Wait for the dispatcher and worker threads to finish.
:param cancel_running_trials: If true, try to cancel all running trials.
"""
self._fmin_done = True
self._dispatcher_thread.join()
self._dispatcher_thread = None
for task_thread in self._task_threads:
task_thread.join()
self._task_threads.clear()
| 23,095 | 39.02773 | 103 |
py
|
hyperopt
|
hyperopt-master/hyperopt/rand.py
|
"""
Random search - presented as hyperopt.fmin_random
"""
import logging
import numpy as np
from . import pyll
from .base import miscs_update_idxs_vals
logger = logging.getLogger(__name__)
def suggest(new_ids, domain, trials, seed):
rng = np.random.default_rng(seed)
rval = []
for ii, new_id in enumerate(new_ids):
# -- sample new specs, idxs, vals
idxs, vals = pyll.rec_eval(
domain.s_idxs_vals, memo={domain.s_new_ids: [new_id], domain.s_rng: rng}
)
new_result = domain.new_result()
new_misc = dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)
miscs_update_idxs_vals([new_misc], idxs, vals)
rval.extend(trials.new_trial_docs([new_id], [None], [new_result], [new_misc]))
return rval
def suggest_batch(new_ids, domain, trials, seed):
rng = np.random.default_rng(seed)
# -- sample new specs, idxs, vals
idxs, vals = pyll.rec_eval(
domain.s_idxs_vals, memo={domain.s_new_ids: new_ids, domain.s_rng: rng}
)
return idxs, vals
# flake8 likes no trailing blank line
| 1,088 | 26.923077 | 86 |
py
|
hyperopt
|
hyperopt-master/hyperopt/atpe.py
|
"""
Implements the ATPE algorithm. See
https://www.electricbrain.io/blog/learning-to-optimize
and
https://www.electricbrain.io/blog/optimizing-optimization to learn more
"""
__authors__ = "Bradley Arsenault"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
from hyperopt import hp
from contextlib import contextmanager
import re
import functools
import random
import numpy
import numpy.random
import pkg_resources
import tempfile
import scipy.stats
import os
import math
import hyperopt
import datetime
import json
import copy
# Windows doesn't support opening a NamedTemporaryFile.
# Solution inspired in https://stackoverflow.com/a/46501017/147507
@contextmanager
def ClosedNamedTempFile(contents):
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
file_name = f.name
f.write(contents)
yield file_name
finally:
os.unlink(file_name)
class Hyperparameter:
"""This class represents a hyperparameter."""
def __init__(self, config, parent=None, root="root"):
self.config = config
self.root = root
self.name = root[5:]
self.parent = parent
self.resultVariableName = re.sub("\\.\\d+\\.", ".", self.name)
self.hyperoptVariableName = self.root
if "name" in config:
self.hyperoptVariableName = config["name"]
def createHyperoptSpace(self, lockedValues=None):
name = self.root
if lockedValues is None:
lockedValues = {}
if "anyOf" in self.config or "oneOf" in self.config:
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
subSpaces = [
Hyperparameter(
param, self, name + "." + str(index)
).createHyperoptSpace(lockedValues)
for index, param in enumerate(data)
]
for index, space in enumerate(subSpaces):
space["$index"] = index
choices = hp.choice(self.hyperoptVariableName, subSpaces)
return choices
elif "enum" in self.config:
if self.name in lockedValues:
return lockedValues[self.name]
choices = hp.choice(self.hyperoptVariableName, self.config["enum"])
return choices
elif "constant" in self.config:
if self.name in lockedValues:
return lockedValues[self.name]
return self.config["constant"]
elif self.config["type"] == "object":
space = {}
for key in self.config["properties"].keys():
config = self.config["properties"][key]
space[key] = Hyperparameter(
config, self, name + "." + key
).createHyperoptSpace(lockedValues)
return space
elif self.config["type"] == "number":
if self.name in lockedValues:
return lockedValues[self.name]
mode = self.config.get("mode", "uniform")
scaling = self.config.get("scaling", "linear")
if mode == "uniform":
min = self.config.get("min", 0)
max = self.config.get("max", 1)
rounding = self.config.get("rounding", None)
if scaling == "linear":
if rounding is not None:
return hp.quniform(
self.hyperoptVariableName, min, max, rounding
)
else:
return hp.uniform(self.hyperoptVariableName, min, max)
elif scaling == "logarithmic":
if rounding is not None:
return hp.qloguniform(
self.hyperoptVariableName,
math.log(min),
math.log(max),
rounding,
)
else:
return hp.loguniform(
self.hyperoptVariableName, math.log(min), math.log(max)
)
if mode == "randint":
min = self.config.get("min")
max = self.config.get("max")
return hp.randint(self.hyperoptVariableName, min, max)
if mode == "normal":
mean = self.config.get("mean", 0)
stddev = self.config.get("stddev", 1)
rounding = self.config.get("rounding", None)
if scaling == "linear":
if rounding is not None:
return hp.qnormal(
self.hyperoptVariableName, mean, stddev, rounding
)
else:
return hp.normal(self.hyperoptVariableName, mean, stddev)
elif scaling == "logarithmic":
if rounding is not None:
return hp.qlognormal(
self.hyperoptVariableName,
math.log(mean),
math.log(stddev),
rounding,
)
else:
return hp.lognormal(
self.hyperoptVariableName, math.log(mean), math.log(stddev)
)
def getFlatParameterNames(self):
name = self.root
if "anyOf" in self.config or "oneOf" in self.config:
keys = set()
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
for index, param in enumerate(data):
subKeys = Hyperparameter(
param, self, name + "." + str(index)
).getFlatParameterNames()
for key in subKeys:
keys.add(key)
return keys
elif "enum" in self.config or "constant" in self.config:
return [name]
elif self.config["type"] == "object":
keys = set()
for key in self.config["properties"].keys():
config = self.config["properties"][key]
subKeys = Hyperparameter(
config, self, name + "." + key
).getFlatParameterNames()
for key in subKeys:
keys.add(key)
return keys
elif self.config["type"] == "number":
return [name]
def getFlatParameters(self):
name = self.root
if "anyOf" in self.config or "oneOf" in self.config:
parameters = []
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
for index, param in enumerate(data):
subParameters = Hyperparameter(
param, self, name + "." + str(index)
).getFlatParameters()
parameters = parameters + subParameters
return parameters
elif "enum" in self.config or "constant" in self.config:
return [self]
elif self.config["type"] == "object":
parameters = []
for key in self.config["properties"].keys():
config = self.config["properties"][key]
subParameters = Hyperparameter(
config, self, name + "." + key
).getFlatParameters()
parameters = parameters + subParameters
return parameters
elif self.config["type"] == "number":
return [self]
def getLog10Cardinality(self):
if "anyOf" in self.config or "oneOf" in self.config:
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
log10_cardinality = Hyperparameter(
data[0], self, self.root + ".0"
).getLog10Cardinality()
for index, subParam in enumerate(data[1:]):
# We used logarithm identities to create this reduction formula
other_log10_cardinality = Hyperparameter(
subParam, self, self.root + "." + str(index)
).getLog10Cardinality()
# Revert to linear at high and low values, for numerical stability. Check here: https://www.desmos.com/calculator/efkbbftd18 to observe
if (log10_cardinality - other_log10_cardinality) > 3:
log10_cardinality = log10_cardinality + 1
elif (other_log10_cardinality - log10_cardinality) > 3:
log10_cardinality = other_log10_cardinality + 1
else:
log10_cardinality = other_log10_cardinality + math.log10(
1 + math.pow(10, log10_cardinality - other_log10_cardinality)
)
return log10_cardinality + math.log10(len(data))
elif "enum" in self.config:
return math.log10(len(self.config["enum"]))
elif "constant" in self.config:
return math.log10(1)
elif self.config["type"] == "object":
log10_cardinality = 0
for index, subParam in enumerate(self.config["properties"].values()):
subParameter = Hyperparameter(
subParam, self, self.root + "." + str(index)
)
log10_cardinality += subParameter.getLog10Cardinality()
return log10_cardinality
elif self.config["type"] == "number":
if "rounding" in self.config:
return math.log10(
min(
20,
(self.config["max"] - self.config["min"])
/ self.config["rounding"]
+ 1,
)
)
else:
# Default of 20 for fully uniform numbers.
return math.log10(20)
def convertToFlatValues(self, params):
flatParams = {}
def recurse(key, value, root):
result_key = root + "." + key
if isinstance(value, str):
flatParams[result_key[1:]] = value
elif (
isinstance(value, float)
or isinstance(value, bool)
or isinstance(value, int)
or numpy.issubdtype(value, numpy.integer)
or numpy.issubdtype(value, numpy.floating)
):
flatParams[result_key[1:]] = value
elif isinstance(value, dict):
for subkey, subvalue in value.items():
recurse(subkey, subvalue, result_key)
for key in params.keys():
value = params[key]
recurse(key, value, "")
flatValues = {}
if "anyOf" in self.config or "oneOf" in self.config:
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
subParameterIndex = flatParams[self.resultVariableName + ".$index"]
flatValues[self.name] = subParameterIndex
for index, param in enumerate(data):
subParameter = Hyperparameter(param, self, self.root + "." + str(index))
if index == subParameterIndex:
subFlatValues = subParameter.convertToFlatValues(flatParams)
for key in subFlatValues:
flatValues[key] = subFlatValues[key]
else:
for flatParam in subParameter.getFlatParameters():
flatValues[flatParam.name] = ""
return flatValues
elif "constant" in self.config:
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
elif "enum" in self.config:
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
elif self.config["type"] == "object":
for key in self.config["properties"].keys():
config = self.config["properties"][key]
subFlatValues = Hyperparameter(
config, self, self.root + "." + key
).convertToFlatValues(flatParams)
for key in subFlatValues:
flatValues[key] = subFlatValues[key]
if self.name == "":
for key in params.keys():
if key.startswith("$"):
flatValues[key] = params[key]
return flatValues
elif self.config["type"] == "number":
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
def convertToStructuredValues(self, flatValues):
if "anyOf" in self.config or "oneOf" in self.config:
v = "anyOf" if "anyOf" in self.config else "oneOf"
data = self.config[v]
subParameterIndex = flatValues[self.name]
subParam = Hyperparameter(
data[subParameterIndex], self, self.root + "." + str(subParameterIndex)
)
structured = subParam.convertToStructuredValues(flatValues)
structured["$index"] = subParameterIndex
return structured
elif "constant" in self.config:
return flatValues[self.name]
elif "enum" in self.config:
return flatValues[self.name]
elif self.config["type"] == "object":
result = {}
for key in self.config["properties"].keys():
config = self.config["properties"][key]
subStructuredValue = Hyperparameter(
config, self, self.root + "." + key
).convertToStructuredValues(flatValues)
result[key] = subStructuredValue
if self.name == "":
for key in flatValues.keys():
if key.startswith("$"):
result[key] = flatValues[key]
return result
elif self.config["type"] == "number":
return flatValues[self.name]
@staticmethod
def createHyperparameterConfigForHyperoptDomain(domain):
if domain.name is None:
data = {"type": "object", "properties": {}}
for key in domain.params:
data["properties"][
key
] = Hyperparameter.createHyperparameterConfigForHyperoptDomain(
domain.params[key]
)
if "name" not in data["properties"][key]:
data["properties"][key]["name"] = key
return data
elif domain.name == "dict":
data = {"type": "object", "properties": {}}
for item in domain.named_args:
data["properties"][
item[0]
] = Hyperparameter.createHyperparameterConfigForHyperoptDomain(item[1])
return data
elif domain.name == "switch":
data = {"oneOf": []}
data["name"] = domain.pos_args[0].pos_args
for item in domain.pos_args[1:]:
data["oneOf"].append(
Hyperparameter.createHyperparameterConfigForHyperoptDomain(item)
)
return data
elif domain.name == "hyperopt_param":
data = Hyperparameter.createHyperparameterConfigForHyperoptDomain(
domain.pos_args[1]
)
data["name"] = domain.pos_args[0]._obj
return data
elif domain.name == "uniform":
data = {"type": "number"}
data["scaling"] = "linear"
data["mode"] = "uniform"
data["min"] = domain.pos_args[0]._obj
data["max"] = domain.pos_args[1]._obj
return data
elif domain.name == "quniform":
data = {"type": "number"}
data["scaling"] = "linear"
data["mode"] = "uniform"
data["min"] = domain.pos_args[0]._obj
data["max"] = domain.pos_args[1]._obj
data["rounding"] = domain.pos_args[2]._obj
return data
elif domain.name == "loguniform":
data = {"type": "number"}
data["scaling"] = "logarithmic"
data["mode"] = "uniform"
data["min"] = math.exp(domain.pos_args[0]._obj)
data["max"] = math.exp(domain.pos_args[1]._obj)
return data
elif domain.name == "qloguniform":
data = {"type": "number"}
data["scaling"] = "logarithmic"
data["mode"] = "uniform"
data["min"] = math.exp(domain.pos_args[0]._obj)
data["max"] = math.exp(domain.pos_args[1]._obj)
data["rounding"] = domain.pos_args[2]._obj
return data
elif domain.name == "normal":
data = {"type": "number"}
data["scaling"] = "linear"
data["mode"] = "normal"
data["mean"] = domain.pos_args[0]._obj
data["stddev"] = domain.pos_args[1]._obj
return data
elif domain.name == "qnormal":
data = {"type": "number"}
data["scaling"] = "linear"
data["mode"] = "normal"
data["mean"] = domain.pos_args[0]._obj
data["stddev"] = domain.pos_args[1]._obj
data["rounding"] = domain.pos_args[2]._obj
return data
elif domain.name == "lognormal":
data = {"type": "number"}
data["scaling"] = "logarithmic"
data["mode"] = "normal"
data["mean"] = math.exp(domain.pos_args[0]._obj)
data["stddev"] = math.exp(domain.pos_args[1]._obj)
return data
elif domain.name == "qlognormal":
data = {"type": "number"}
data["scaling"] = "logarithmic"
data["mode"] = "normal"
data["mean"] = math.exp(domain.pos_args[0]._obj)
data["stddev"] = math.exp(domain.pos_args[1]._obj)
data["rounding"] = domain.pos_args[2]._obj
return data
elif domain.name == "literal":
data = {"type": "string", "constant": domain._obj}
return data
elif domain.name == "randint":
data = {"type": "number"}
low = domain.pos_args[0]._obj
high = domain.pos_args[1]._obj if len(domain.pos_args) > 1 else None
data["min"] = 0 if high is None else low
data["max"] = high or low
data["mode"] = "randint"
return data
else:
raise ValueError("Unsupported hyperopt domain type " + str(domain))
class ATPEOptimizer:
resultInformationKeys = ["trial", "status", "loss", "time", "log", "error"]
atpeParameters = [
"gamma",
"nEICandidates",
"resultFilteringAgeMultiplier",
"resultFilteringLossRankMultiplier",
"resultFilteringMode",
"resultFilteringRandomProbability",
"secondaryCorrelationExponent",
"secondaryCorrelationMultiplier",
"secondaryCutoff",
"secondaryFixedProbability",
"secondaryLockingMode",
"secondaryProbabilityMode",
"secondaryTopLockingPercentile",
]
atpeParameterCascadeOrdering = [
"resultFilteringMode",
"secondaryProbabilityMode",
"secondaryLockingMode",
"resultFilteringAgeMultiplier",
"resultFilteringLossRankMultiplier",
"resultFilteringRandomProbability",
"secondaryTopLockingPercentile",
"secondaryCorrelationExponent",
"secondaryCorrelationMultiplier",
"secondaryFixedProbability",
"secondaryCutoff",
"gamma",
"nEICandidates",
]
atpeParameterValues = {
"resultFilteringMode": ["age", "loss_rank", "none", "random"],
"secondaryLockingMode": ["random", "top"],
"secondaryProbabilityMode": ["correlation", "fixed"],
}
atpeModelFeatureKeys = [
"all_correlation_best_percentile25_ratio",
"all_correlation_best_percentile50_ratio",
"all_correlation_best_percentile75_ratio",
"all_correlation_kurtosis",
"all_correlation_percentile5_percentile25_ratio",
"all_correlation_skew",
"all_correlation_stddev_best_ratio",
"all_correlation_stddev_median_ratio",
"all_loss_best_percentile25_ratio",
"all_loss_best_percentile50_ratio",
"all_loss_best_percentile75_ratio",
"all_loss_kurtosis",
"all_loss_percentile5_percentile25_ratio",
"all_loss_skew",
"all_loss_stddev_best_ratio",
"all_loss_stddev_median_ratio",
"log10_cardinality",
"recent_10_correlation_best_percentile25_ratio",
"recent_10_correlation_best_percentile50_ratio",
"recent_10_correlation_best_percentile75_ratio",
"recent_10_correlation_kurtosis",
"recent_10_correlation_percentile5_percentile25_ratio",
"recent_10_correlation_skew",
"recent_10_correlation_stddev_best_ratio",
"recent_10_correlation_stddev_median_ratio",
"recent_10_loss_best_percentile25_ratio",
"recent_10_loss_best_percentile50_ratio",
"recent_10_loss_best_percentile75_ratio",
"recent_10_loss_kurtosis",
"recent_10_loss_percentile5_percentile25_ratio",
"recent_10_loss_skew",
"recent_10_loss_stddev_best_ratio",
"recent_10_loss_stddev_median_ratio",
"recent_15%_correlation_best_percentile25_ratio",
"recent_15%_correlation_best_percentile50_ratio",
"recent_15%_correlation_best_percentile75_ratio",
"recent_15%_correlation_kurtosis",
"recent_15%_correlation_percentile5_percentile25_ratio",
"recent_15%_correlation_skew",
"recent_15%_correlation_stddev_best_ratio",
"recent_15%_correlation_stddev_median_ratio",
"recent_15%_loss_best_percentile25_ratio",
"recent_15%_loss_best_percentile50_ratio",
"recent_15%_loss_best_percentile75_ratio",
"recent_15%_loss_kurtosis",
"recent_15%_loss_percentile5_percentile25_ratio",
"recent_15%_loss_skew",
"recent_15%_loss_stddev_best_ratio",
"recent_15%_loss_stddev_median_ratio",
"recent_25_correlation_best_percentile25_ratio",
"recent_25_correlation_best_percentile50_ratio",
"recent_25_correlation_best_percentile75_ratio",
"recent_25_correlation_kurtosis",
"recent_25_correlation_percentile5_percentile25_ratio",
"recent_25_correlation_skew",
"recent_25_correlation_stddev_best_ratio",
"recent_25_correlation_stddev_median_ratio",
"recent_25_loss_best_percentile25_ratio",
"recent_25_loss_best_percentile50_ratio",
"recent_25_loss_best_percentile75_ratio",
"recent_25_loss_kurtosis",
"recent_25_loss_percentile5_percentile25_ratio",
"recent_25_loss_skew",
"recent_25_loss_stddev_best_ratio",
"recent_25_loss_stddev_median_ratio",
"top_10%_correlation_best_percentile25_ratio",
"top_10%_correlation_best_percentile50_ratio",
"top_10%_correlation_best_percentile75_ratio",
"top_10%_correlation_kurtosis",
"top_10%_correlation_percentile5_percentile25_ratio",
"top_10%_correlation_skew",
"top_10%_correlation_stddev_best_ratio",
"top_10%_correlation_stddev_median_ratio",
"top_10%_loss_best_percentile25_ratio",
"top_10%_loss_best_percentile50_ratio",
"top_10%_loss_best_percentile75_ratio",
"top_10%_loss_kurtosis",
"top_10%_loss_percentile5_percentile25_ratio",
"top_10%_loss_skew",
"top_10%_loss_stddev_best_ratio",
"top_10%_loss_stddev_median_ratio",
"top_20%_correlation_best_percentile25_ratio",
"top_20%_correlation_best_percentile50_ratio",
"top_20%_correlation_best_percentile75_ratio",
"top_20%_correlation_kurtosis",
"top_20%_correlation_percentile5_percentile25_ratio",
"top_20%_correlation_skew",
"top_20%_correlation_stddev_best_ratio",
"top_20%_correlation_stddev_median_ratio",
"top_20%_loss_best_percentile25_ratio",
"top_20%_loss_best_percentile50_ratio",
"top_20%_loss_best_percentile75_ratio",
"top_20%_loss_kurtosis",
"top_20%_loss_percentile5_percentile25_ratio",
"top_20%_loss_skew",
"top_20%_loss_stddev_best_ratio",
"top_20%_loss_stddev_median_ratio",
"top_30%_correlation_best_percentile25_ratio",
"top_30%_correlation_best_percentile50_ratio",
"top_30%_correlation_best_percentile75_ratio",
"top_30%_correlation_kurtosis",
"top_30%_correlation_percentile5_percentile25_ratio",
"top_30%_correlation_skew",
"top_30%_correlation_stddev_best_ratio",
"top_30%_correlation_stddev_median_ratio",
"top_30%_loss_best_percentile25_ratio",
"top_30%_loss_best_percentile50_ratio",
"top_30%_loss_best_percentile75_ratio",
"top_30%_loss_kurtosis",
"top_30%_loss_percentile5_percentile25_ratio",
"top_30%_loss_skew",
"top_30%_loss_stddev_best_ratio",
"top_30%_loss_stddev_median_ratio",
]
def __init__(self):
try:
import lightgbm
import sklearn
except ImportError:
raise ImportError(
"You must install lightgbm and sklearn in order to use the ATPE algorithm. Please run `pip install lightgbm scikit-learn` and try again. These are not built in dependencies of hyperopt."
)
scalingModelData = json.loads(
pkg_resources.resource_string(
__name__, "atpe_models/scaling_model.json"
).decode("utf-8")
)
self.featureScalingModels = {}
for key in self.atpeModelFeatureKeys:
self.featureScalingModels[key] = sklearn.preprocessing.StandardScaler()
self.featureScalingModels[key].scale_ = numpy.array(
scalingModelData[key]["scales"]
)
self.featureScalingModels[key].mean_ = numpy.array(
scalingModelData[key]["means"]
)
self.featureScalingModels[key].var_ = numpy.array(
scalingModelData[key]["variances"]
)
self.featureScalingModels[key].n_features_in_ = 1
self.parameterModels = {}
self.parameterModelConfigurations = {}
for param in self.atpeParameters:
modelData = pkg_resources.resource_string(
__name__, "atpe_models/model-" + param + ".txt"
)
with ClosedNamedTempFile(modelData) as model_file_name:
self.parameterModels[param] = lightgbm.Booster(
model_file=model_file_name
)
configString = pkg_resources.resource_string(
__name__, "atpe_models/model-" + param + "-configuration.json"
)
data = json.loads(configString.decode("utf-8"))
self.parameterModelConfigurations[param] = data
self.lastATPEParameters = None
self.lastLockedParameters = []
self.atpeParamDetails = None
def recommendNextParameters(
self, hyperparameterSpace, results, currentTrials, lockedValues=None
):
rstate = numpy.random.default_rng(seed=int(random.randint(1, 2**32 - 1)))
params = {"param": {}}
def sample(parameters):
params["param"] = parameters
return {"loss": 0.5, "status": "ok"}
parameters = Hyperparameter(hyperparameterSpace).getFlatParameters()
if lockedValues is not None:
# Remove any locked values from ones the optimizer will examine
parameters = list(
filter(lambda param: param.name not in lockedValues.keys(), parameters)
)
log10_cardinality = Hyperparameter(hyperparameterSpace).getLog10Cardinality()
initializationRounds = max(10, int(log10_cardinality))
atpeParams = {}
atpeParamDetails = {}
if (
len(list(result for result in results if result["loss"]))
< initializationRounds
):
atpeParams = {
"gamma": 1.0,
"nEICandidates": 24,
"resultFilteringAgeMultiplier": None,
"resultFilteringLossRankMultiplier": None,
"resultFilteringMode": "none",
"resultFilteringRandomProbability": None,
"secondaryCorrelationExponent": 1.0,
"secondaryCorrelationMultiplier": None,
"secondaryCutoff": 0,
"secondarySorting": 0,
"secondaryFixedProbability": 0.5,
"secondaryLockingMode": "top",
"secondaryProbabilityMode": "fixed",
"secondaryTopLockingPercentile": 0,
}
else:
# Calculate the statistics for the distribution
stats = self.computeAllResultStatistics(hyperparameterSpace, results)
stats["num_parameters"] = len(parameters)
stats["log10_cardinality"] = Hyperparameter(
hyperparameterSpace
).getLog10Cardinality()
stats["log10_trial"] = math.log10(len(results))
baseVector = []
for feature in self.atpeModelFeatureKeys:
scalingModel = self.featureScalingModels[feature]
transformed = scalingModel.transform([[stats[feature]]])[0][0]
baseVector.append(transformed)
baseVector = numpy.array([baseVector])
for atpeParamIndex, atpeParameter in enumerate(
self.atpeParameterCascadeOrdering
):
vector = copy.copy(baseVector)[0].tolist()
atpeParamFeatures = self.atpeParameterCascadeOrdering[:atpeParamIndex]
for atpeParamFeature in atpeParamFeatures:
# We have to insert a special value of -3 for any conditional parameters.
if (
atpeParamFeature == "resultFilteringAgeMultiplier"
and atpeParams["resultFilteringMode"] != "age"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif (
atpeParamFeature == "resultFilteringLossRankMultiplier"
and atpeParams["resultFilteringMode"] != "loss_rank"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif (
atpeParamFeature == "resultFilteringRandomProbability"
and atpeParams["resultFilteringMode"] != "random"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif (
atpeParamFeature == "secondaryCorrelationMultiplier"
and atpeParams["secondaryProbabilityMode"] != "correlation"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif (
atpeParamFeature == "secondaryFixedProbability"
and atpeParams["secondaryProbabilityMode"] != "fixed"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif (
atpeParamFeature == "secondaryTopLockingPercentile"
and atpeParams["secondaryLockingMode"] != "top"
):
vector.append(
-3
) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature in self.atpeParameterValues:
for value in self.atpeParameterValues[atpeParamFeature]:
vector.append(
1.0 if atpeParams[atpeParamFeature] == value else 0
)
else:
vector.append(float(atpeParams[atpeParamFeature]))
allFeatureKeysForATPEParamModel = copy.copy(self.atpeModelFeatureKeys)
for atpeParamFeature in atpeParamFeatures:
if atpeParamFeature in self.atpeParameterValues:
for value in self.atpeParameterValues[atpeParamFeature]:
allFeatureKeysForATPEParamModel.append(
atpeParamFeature + "_" + value
)
else:
allFeatureKeysForATPEParamModel.append(atpeParamFeature)
value = self.parameterModels[atpeParameter].predict([vector])[0]
featureContributions = self.parameterModels[atpeParameter].predict(
[vector], pred_contrib=True
)[0]
atpeParamDetails[atpeParameter] = {"value": None, "reason": None}
# Set the value
if atpeParameter in self.atpeParameterValues:
# Renormalize the predicted probabilities
config = self.parameterModelConfigurations[atpeParameter]
for atpeParamValueIndex, atpeParamValue in enumerate(
self.atpeParameterValues[atpeParameter]
):
value[atpeParamValueIndex] = (
(
(
value[atpeParamValueIndex]
- config["predMeans"][atpeParamValue]
)
/ config["predStddevs"][atpeParamValue]
)
* config["origStddevs"][atpeParamValue]
) + config["origMeans"][atpeParamValue]
value[atpeParamValueIndex] = max(
0.0, min(1.0, value[atpeParamValueIndex])
)
maxVal = numpy.max(value)
for atpeParamValueIndex, atpeParamValue in enumerate(
self.atpeParameterValues[atpeParameter]
):
value[atpeParamValueIndex] = max(
value[atpeParamValueIndex], maxVal * 0.15
) # We still allow the non recommended modes to get chosen 15% of the time
# Make a random weighted choice based on the normalized probabilities
probabilities = value / numpy.sum(value)
chosen = numpy.random.choice(
a=self.atpeParameterValues[atpeParameter], p=probabilities
)
atpeParams[atpeParameter] = str(chosen)
else:
# Renormalize the predictions
config = self.parameterModelConfigurations[atpeParameter]
value = (
((value - config["predMean"]) / config["predStddev"])
* config["origStddev"]
) + config["origMean"]
atpeParams[atpeParameter] = float(value)
atpeParamDetails[atpeParameter]["reason"] = {}
# If we are predicting a class, we get separate feature contributions for each class. Take the average
if atpeParameter in self.atpeParameterValues:
featureContributions = numpy.mean(
numpy.reshape(
featureContributions,
newshape=(
len(allFeatureKeysForATPEParamModel) + 1,
len(self.atpeParameterValues[atpeParameter]),
),
),
axis=1,
)
contributions = [
(
self.atpeModelFeatureKeys[index],
float(featureContributions[index]),
)
for index in range(len(self.atpeModelFeatureKeys))
]
contributions = sorted(contributions, key=lambda r: -r[1])
# Only focus on the top 10% of features, since it gives more useful information. Otherwise the total gets really squashed out over many features,
# because our model is highly regularized.
contributions = contributions[: int(len(contributions) / 10)]
total = numpy.sum([contrib[1] for contrib in contributions])
for contributionIndex, contribution in enumerate(contributions[:3]):
atpeParamDetails[atpeParameter]["reason"][contribution[0]] = (
str(int(float(contribution[1]) * 100.0 / total)) + "%"
)
# Apply bounds to all the parameters
if atpeParameter == "gamma":
atpeParams["gamma"] = max(0.2, min(2.0, atpeParams["gamma"]))
if atpeParameter == "nEICandidates":
atpeParams["nEICandidates"] = int(
max(2.0, min(48, atpeParams["nEICandidates"]))
)
if atpeParameter == "resultFilteringAgeMultiplier":
atpeParams["resultFilteringAgeMultiplier"] = max(
1.0, min(4.0, atpeParams["resultFilteringAgeMultiplier"])
)
if atpeParameter == "resultFilteringLossRankMultiplier":
atpeParams["resultFilteringLossRankMultiplier"] = max(
1.0, min(4.0, atpeParams["resultFilteringLossRankMultiplier"])
)
if atpeParameter == "resultFilteringRandomProbability":
atpeParams["resultFilteringRandomProbability"] = max(
0.7, min(0.9, atpeParams["resultFilteringRandomProbability"])
)
if atpeParameter == "secondaryCorrelationExponent":
atpeParams["secondaryCorrelationExponent"] = max(
1.0, min(3.0, atpeParams["secondaryCorrelationExponent"])
)
if atpeParameter == "secondaryCorrelationMultiplier":
atpeParams["secondaryCorrelationMultiplier"] = max(
0.2, min(1.8, atpeParams["secondaryCorrelationMultiplier"])
)
if atpeParameter == "secondaryCutoff":
atpeParams["secondaryCutoff"] = max(
-1.0, min(1.0, atpeParams["secondaryCutoff"])
)
if atpeParameter == "secondaryFixedProbability":
atpeParams["secondaryFixedProbability"] = max(
0.2, min(0.8, atpeParams["secondaryFixedProbability"])
)
if atpeParameter == "secondaryTopLockingPercentile":
atpeParams["secondaryTopLockingPercentile"] = max(
0, min(10.0, atpeParams["secondaryTopLockingPercentile"])
)
# Now blank out unneeded params so they don't confuse us
if atpeParams["secondaryLockingMode"] == "random":
atpeParams["secondaryTopLockingPercentile"] = None
if atpeParams["secondaryProbabilityMode"] == "fixed":
atpeParams["secondaryCorrelationMultiplier"] = None
else:
atpeParams["secondaryFixedProbability"] = None
if atpeParams["resultFilteringMode"] == "none":
atpeParams["resultFilteringAgeMultiplier"] = None
atpeParams["resultFilteringLossRankMultiplier"] = None
atpeParams["resultFilteringRandomProbability"] = None
elif atpeParams["resultFilteringMode"] == "age":
atpeParams["resultFilteringLossRankMultiplier"] = None
atpeParams["resultFilteringRandomProbability"] = None
elif atpeParams["resultFilteringMode"] == "loss_rank":
atpeParams["resultFilteringAgeMultiplier"] = None
atpeParams["resultFilteringRandomProbability"] = None
elif atpeParams["resultFilteringMode"] == "random":
atpeParams["resultFilteringAgeMultiplier"] = None
atpeParams["resultFilteringLossRankMultiplier"] = None
for atpeParameter in self.atpeParameters:
if atpeParams[atpeParameter] is None:
del atpeParamDetails[atpeParameter]
else:
atpeParamDetails[atpeParameter]["value"] = atpeParams[atpeParameter]
self.lastATPEParameters = atpeParams
self.atpeParamDetails = atpeParamDetails
def computePrimarySecondary():
if len(results) < initializationRounds:
return (
parameters,
[],
[0.5] * len(parameters),
) # Put all parameters as primary
if len({result["loss"] for result in results}) < 5:
return (
parameters,
[],
[0.5] * len(parameters),
) # Put all parameters as primary
numberParameters = [
parameter
for parameter in parameters
if parameter.config["type"] == "number"
]
otherParameters = [
parameter
for parameter in parameters
if parameter.config["type"] != "number"
]
totalWeight = 0
correlations = {}
for parameter in numberParameters:
if (
len(
{
result[parameter.name]
for result in results
if result[parameter.name] is not None
}
)
< 2
):
correlations[parameter.name] = 0
else:
values = []
valueLosses = []
for result in results:
if (
result[parameter.name] is not None
and result["loss"] is not None
):
values.append(result[parameter.name])
valueLosses.append(result["loss"])
correlation = math.pow(
abs(scipy.stats.spearmanr(values, valueLosses)[0]),
atpeParams["secondaryCorrelationExponent"],
)
correlations[parameter.name] = correlation
totalWeight += correlation
threshold = totalWeight * abs(atpeParams["secondaryCutoff"])
if atpeParams["secondaryCutoff"] < 0:
# Reverse order - we lock in the highest correlated parameters
sortedParameters = sorted(
numberParameters, key=lambda parameter: correlations[parameter.name]
)
else:
# Normal order - sort properties by their correlation to lock in lowest correlated parameters
sortedParameters = sorted(
numberParameters,
key=lambda parameter: -correlations[parameter.name],
)
primaryParameters = []
secondaryParameters = []
cumulative = totalWeight
for parameter in sortedParameters:
if cumulative < threshold:
secondaryParameters.append(parameter)
else:
primaryParameters.append(parameter)
cumulative -= correlations[parameter.name]
return (
primaryParameters + otherParameters,
secondaryParameters,
correlations,
)
if (
len([result["loss"] for result in results if result["loss"] is not None])
== 0
):
maxLoss = 1
else:
maxLoss = numpy.max(
[result["loss"] for result in results if result["loss"] is not None]
)
# We create a copy of lockedValues so we don't modify the object that was passed in as an argument - treat it as immutable.
# The ATPE algorithm will lock additional values in a stochastic manner
if lockedValues is None:
lockedValues = {}
else:
lockedValues = copy.copy(lockedValues)
filteredResults = []
removedResults = []
if len(results) > initializationRounds:
(
primaryParameters,
secondaryParameters,
correlations,
) = computePrimarySecondary()
self.lastLockedParameters = []
sortedResults = list(
sorted(
list(results),
key=lambda result: (
result["loss"] if result["loss"] is not None else (maxLoss + 1)
),
)
)
topResults = sortedResults
if atpeParams["secondaryLockingMode"] == "top":
topResultsN = max(
1,
int(
math.ceil(
len(sortedResults)
* atpeParams["secondaryTopLockingPercentile"]
/ 100.0
)
),
)
topResults = sortedResults[:topResultsN]
# Any secondary parameters have may be locked to either the current best
# value or any value within the result pool.
for secondary in secondaryParameters:
if atpeParams["secondaryProbabilityMode"] == "fixed":
if random.uniform(0, 1) < atpeParams["secondaryFixedProbability"]:
self.lastLockedParameters.append(secondary.name)
if atpeParams["secondaryLockingMode"] == "top":
lockResult = random.choice(topResults)
if (
lockResult[secondary.name] is not None
and lockResult[secondary.name] != ""
):
lockedValues[secondary.name] = lockResult[
secondary.name
]
elif atpeParams["secondaryLockingMode"] == "random":
lockedValues[
secondary.name
] = self.chooseRandomValueForParameter(secondary)
elif atpeParams["secondaryProbabilityMode"] == "correlation":
probability = max(
0,
min(
1,
abs(correlations[secondary.name])
* atpeParams["secondaryCorrelationMultiplier"],
),
)
if random.uniform(0, 1) < probability:
self.lastLockedParameters.append(secondary.name)
if atpeParams["secondaryLockingMode"] == "top":
lockResult = random.choice(topResults)
if (
lockResult[secondary.name] is not None
and lockResult[secondary.name] != ""
):
lockedValues[secondary.name] = lockResult[
secondary.name
]
elif atpeParams["secondaryLockingMode"] == "random":
lockedValues[
secondary.name
] = self.chooseRandomValueForParameter(secondary)
# Now last step, we filter results prior to sending them into ATPE
for resultIndex, result in enumerate(results):
if atpeParams["resultFilteringMode"] == "none":
filteredResults.append(result)
elif atpeParams["resultFilteringMode"] == "random":
if (
random.uniform(0, 1)
< atpeParams["resultFilteringRandomProbability"]
):
filteredResults.append(result)
else:
removedResults.append(result)
elif atpeParams["resultFilteringMode"] == "age":
age = float(resultIndex) / float(len(results))
if random.uniform(0, 1) < (
atpeParams["resultFilteringAgeMultiplier"] * age
):
filteredResults.append(result)
else:
removedResults.append(result)
elif atpeParams["resultFilteringMode"] == "loss_rank":
rank = 1.0 - (
float(sortedResults.index(result)) / float(len(results))
)
if random.uniform(0, 1) < (
atpeParams["resultFilteringLossRankMultiplier"] * rank
):
filteredResults.append(result)
else:
removedResults.append(result)
# If we are in initialization, or by some other fluke of random nature that we
# end up with no results after filtering, then just use all the results
if len(filteredResults) == 0:
filteredResults = results
hyperopt.fmin(
fn=sample,
space=Hyperparameter(hyperparameterSpace).createHyperoptSpace(lockedValues),
algo=functools.partial(
hyperopt.tpe.suggest,
n_startup_jobs=initializationRounds,
gamma=atpeParams["gamma"],
n_EI_candidates=int(atpeParams["nEICandidates"]),
),
max_evals=1,
trials=self.convertResultsToTrials(hyperparameterSpace, filteredResults),
rstate=rstate,
show_progressbar=False,
)
return params.get("param")
def chooseRandomValueForParameter(self, parameter):
if parameter.config.get("mode", "uniform") == "uniform":
minVal = parameter.config["min"]
maxVal = parameter.config["max"]
if parameter.config.get("scaling", "linear") == "logarithmic":
minVal = math.log(minVal)
maxVal = math.log(maxVal)
value = random.uniform(minVal, maxVal)
if parameter.config.get("scaling", "linear") == "logarithmic":
value = math.exp(value)
if "rounding" in parameter.config:
value = (
round(value / parameter.config["rounding"])
* parameter.config["rounding"]
)
elif parameter.config.get("mode", "uniform") == "normal":
meanVal = parameter.config["mean"]
stddevVal = parameter.config["stddev"]
if parameter.config.get("scaling", "linear") == "logarithmic":
meanVal = math.log(meanVal)
stddevVal = math.log(stddevVal)
value = random.gauss(meanVal, stddevVal)
if parameter.config.get("scaling", "linear") == "logarithmic":
value = math.exp(value)
if "rounding" in parameter.config:
value = (
round(value / parameter.config["rounding"])
* parameter.config["rounding"]
)
elif parameter.config.get("mode", "uniform") == "randint":
min = parameter.config["min"]
max = parameter.config["max"]
# `max` should be reduced by one, as native randint includes `max`, while numpy randint excludes it
value = random.randint(min, max - 1)
return value
def computePartialResultStatistics(self, hyperparameterSpace, results):
losses = numpy.array(
sorted([result["loss"] for result in results if result["loss"] is not None])
)
bestLoss = 0
percentile5Loss = 0
percentile25Loss = 0
percentile50Loss = 0
percentile75Loss = 0
statistics = {}
if len(set(losses)) > 1:
bestLoss = numpy.percentile(losses, 0)
percentile5Loss = numpy.percentile(losses, 5)
percentile25Loss = numpy.percentile(losses, 25)
percentile50Loss = numpy.percentile(losses, 50)
percentile75Loss = numpy.percentile(losses, 75)
statistics["loss_skew"] = scipy.stats.skew(losses)
statistics["loss_kurtosis"] = scipy.stats.kurtosis(losses)
else:
statistics["loss_skew"] = 0
statistics["loss_kurtosis"] = 0
if percentile50Loss == 0:
statistics["loss_stddev_median_ratio"] = 0
statistics["loss_best_percentile50_ratio"] = 0
else:
statistics["loss_stddev_median_ratio"] = (
numpy.std(losses) / percentile50Loss
)
statistics["loss_best_percentile50_ratio"] = bestLoss / percentile50Loss
if bestLoss == 0:
statistics["loss_stddev_best_ratio"] = 0
else:
statistics["loss_stddev_best_ratio"] = numpy.std(losses) / bestLoss
if percentile25Loss == 0:
statistics["loss_best_percentile25_ratio"] = 0
statistics["loss_percentile5_percentile25_ratio"] = 0
else:
statistics["loss_best_percentile25_ratio"] = bestLoss / percentile25Loss
statistics["loss_percentile5_percentile25_ratio"] = (
percentile5Loss / percentile25Loss
)
if percentile75Loss == 0:
statistics["loss_best_percentile75_ratio"] = 0
else:
statistics["loss_best_percentile75_ratio"] = bestLoss / percentile75Loss
def getValue(result, parameter):
return result[parameter.name]
# Now we compute correlations between each parameter and the loss
parameters = Hyperparameter(hyperparameterSpace).getFlatParameters()
correlations = []
for parameter in parameters:
if parameter.config["type"] == "number":
if (
len(
{
getValue(result, parameter)
for result in results
if (
getValue(result, parameter) is not None
and result["loss"] is not None
)
}
)
< 2
):
correlations.append(0)
else:
values = []
valueLosses = []
for result in results:
if result["loss"] is not None and (
isinstance(getValue(result, parameter), float)
or isinstance(getValue(result, parameter), int)
):
values.append(getValue(result, parameter))
valueLosses.append(result["loss"])
correlation = abs(scipy.stats.spearmanr(values, valueLosses)[0])
if math.isnan(correlation) or math.isinf(correlation):
correlations.append(0)
else:
correlations.append(correlation)
correlations = numpy.array(correlations)
if len(set(correlations)) == 1:
statistics["correlation_skew"] = 0
statistics["correlation_kurtosis"] = 0
statistics["correlation_stddev_median_ratio"] = 0
statistics["correlation_stddev_best_ratio"] = 0
statistics["correlation_best_percentile25_ratio"] = 0
statistics["correlation_best_percentile50_ratio"] = 0
statistics["correlation_best_percentile75_ratio"] = 0
statistics["correlation_percentile5_percentile25_ratio"] = 0
else:
bestCorrelation = numpy.percentile(
correlations, 100
) # Correlations are in the opposite order of losses, higher correlation is considered "best"
percentile5Correlation = numpy.percentile(correlations, 95)
percentile25Correlation = numpy.percentile(correlations, 75)
percentile50Correlation = numpy.percentile(correlations, 50)
percentile75Correlation = numpy.percentile(correlations, 25)
statistics["correlation_skew"] = scipy.stats.skew(correlations)
statistics["correlation_kurtosis"] = scipy.stats.kurtosis(correlations)
if percentile50Correlation == 0:
statistics["correlation_stddev_median_ratio"] = 0
statistics["correlation_best_percentile50_ratio"] = 0
else:
statistics["correlation_stddev_median_ratio"] = (
numpy.std(correlations) / percentile50Correlation
)
statistics["correlation_best_percentile50_ratio"] = (
bestCorrelation / percentile50Correlation
)
if bestCorrelation == 0:
statistics["correlation_stddev_best_ratio"] = 0
else:
statistics["correlation_stddev_best_ratio"] = (
numpy.std(correlations) / bestCorrelation
)
if percentile25Correlation == 0:
statistics["correlation_best_percentile25_ratio"] = 0
statistics["correlation_percentile5_percentile25_ratio"] = 0
else:
statistics["correlation_best_percentile25_ratio"] = (
bestCorrelation / percentile25Correlation
)
statistics["correlation_percentile5_percentile25_ratio"] = (
percentile5Correlation / percentile25Correlation
)
if percentile75Correlation == 0:
statistics["correlation_best_percentile75_ratio"] = 0
else:
statistics["correlation_best_percentile75_ratio"] = (
bestCorrelation / percentile75Correlation
)
return statistics
def computeAllResultStatistics(self, hyperparameterSpace, results):
losses = numpy.array(
sorted([result["loss"] for result in results if result["loss"] is not None])
)
if len(set(losses)) > 1:
percentile10Loss = numpy.percentile(losses, 10)
percentile20Loss = numpy.percentile(losses, 20)
percentile30Loss = numpy.percentile(losses, 30)
else:
percentile10Loss = losses[0]
percentile20Loss = losses[0]
percentile30Loss = losses[0]
allResults = list(results)
percentile10Results = [
result
for result in results
if result["loss"] is not None and result["loss"] <= percentile10Loss
]
percentile20Results = [
result
for result in results
if result["loss"] is not None and result["loss"] <= percentile20Loss
]
percentile30Results = [
result
for result in results
if result["loss"] is not None and result["loss"] <= percentile30Loss
]
recent10Count = min(len(results), 10)
recent10Results = results[-recent10Count:]
recent25Count = min(len(results), 25)
recent25Results = results[-recent25Count:]
recent15PercentCount = max(math.ceil(len(results) * 0.15), 5)
recent15PercentResults = results[-recent15PercentCount:]
statistics = {}
allResultStatistics = self.computePartialResultStatistics(
hyperparameterSpace, allResults
)
for stat, value in allResultStatistics.items():
statistics["all_" + stat] = value
percentile10Statistics = self.computePartialResultStatistics(
hyperparameterSpace, percentile10Results
)
for stat, value in percentile10Statistics.items():
statistics["top_10%_" + stat] = value
percentile20Statistics = self.computePartialResultStatistics(
hyperparameterSpace, percentile20Results
)
for stat, value in percentile20Statistics.items():
statistics["top_20%_" + stat] = value
percentile30Statistics = self.computePartialResultStatistics(
hyperparameterSpace, percentile30Results
)
for stat, value in percentile30Statistics.items():
statistics["top_30%_" + stat] = value
recent10Statistics = self.computePartialResultStatistics(
hyperparameterSpace, recent10Results
)
for stat, value in recent10Statistics.items():
statistics["recent_10_" + stat] = value
recent25Statistics = self.computePartialResultStatistics(
hyperparameterSpace, recent25Results
)
for stat, value in recent25Statistics.items():
statistics["recent_25_" + stat] = value
recent15PercentResult = self.computePartialResultStatistics(
hyperparameterSpace, recent15PercentResults
)
for stat, value in recent15PercentResult.items():
statistics["recent_15%_" + stat] = value
# Although we have added lots of protection in the computePartialResultStatistics code, one last hedge against any NaN or infinity values coming up
# in our statistics
for key in statistics.keys():
if math.isnan(statistics[key]) or math.isinf(statistics[key]):
statistics[key] = 0
return statistics
def convertResultsToTrials(self, hyperparameterSpace, results):
trials = hyperopt.Trials()
for resultIndex, result in enumerate(results):
data = {
"book_time": datetime.datetime.now(),
"exp_key": None,
"misc": {
"cmd": ("domain_attachment", "FMinIter_Domain"),
"idxs": {},
"tid": resultIndex,
"vals": {},
"workdir": None,
},
"owner": None,
"refresh_time": datetime.datetime.now(),
"result": {"loss": result["loss"], "status": result["status"]},
"spec": None,
"state": 2,
"tid": resultIndex,
"version": 0,
}
for param in Hyperparameter(hyperparameterSpace).getFlatParameters():
value = result[param.name]
if value not in ("", None):
if "enum" in param.config:
value = param.config["enum"].index(value)
data["misc"]["idxs"][param.hyperoptVariableName] = [resultIndex]
data["misc"]["vals"][param.hyperoptVariableName] = [value]
else:
data["misc"]["idxs"][param.hyperoptVariableName] = []
data["misc"]["vals"][param.hyperoptVariableName] = []
trials.insert_trial_doc(data)
return trials
def convertTrialsToResults(self, hyperparameterSpace, trials):
results = []
for trialIndex, trial in enumerate(trials.trials):
data = {
"trial": trialIndex,
"status": trial["result"]["status"],
"loss": trial["result"]["loss"],
"log": "",
"time": abs(
(trial["book_time"] - trial["refresh_time"]).total_seconds()
),
}
params = trial["misc"]["vals"]
for param in Hyperparameter(hyperparameterSpace).getFlatParameters():
key = param.hyperoptVariableName
if len(params[key]) == 1:
value = params[key][0]
if "enum" in param.config:
value = param.config["enum"][value]
data[param.name] = value
else:
data[param.name] = ""
results.append(data)
return results
def suggest(new_ids, domain, trials, seed):
optimizer = ATPEOptimizer()
# Convert the PyLL domain back into a descriptive form of hyperparameter space
hyperparameterConfig = Hyperparameter.createHyperparameterConfigForHyperoptDomain(
domain
)
results = optimizer.convertTrialsToResults(hyperparameterConfig, trials)
# If there is a loss value that is negative, then we must increment the values so
# they are all positive. This is because ATPE has been optimized only for positive
# loss value
if len(results) > 0:
minVal = min(
[result["loss"] for result in results if result["loss"] is not None]
)
if minVal < 0:
for result in results:
if result["loss"] is not None:
result["loss"] = result["loss"] - minVal + 0.1
hyperparameters = Hyperparameter(hyperparameterConfig)
rval = []
for new_id in new_ids:
parameters = optimizer.recommendNextParameters(
hyperparameterConfig, results, currentTrials=[]
)
flatParameters = hyperparameters.convertToFlatValues(parameters)
rval_results = [domain.new_result()]
rval_miscs = [
dict(
tid=new_id,
cmd=domain.cmd,
workdir=domain.workdir,
idxs={key: [0] for key in flatParameters},
vals={key: [flatParameters[key]] for key in flatParameters},
)
]
rval.extend(trials.new_trial_docs([new_id], [None], rval_results, rval_miscs))
return rval
| 67,630 | 40.721777 | 202 |
py
|
hyperopt
|
hyperopt-master/hyperopt/early_stop.py
|
import logging
logger = logging.getLogger(__name__)
def no_progress_loss(iteration_stop_count=20, percent_increase=0.0):
"""
Stop function that will stop after X iteration if the loss doesn't increase
Parameters
----------
iteration_stop_count: int
search will stop if the loss doesn't improve after this number of iteration
percent_increase: float
allow this percentage of variation within iteration_stop_count.
Early stop will be triggered if the data didn't change for more than this number
after iteration_stop_count rounds
"""
def stop_fn(trials, best_loss=None, iteration_no_progress=0):
new_loss = trials.trials[len(trials.trials) - 1]["result"]["loss"]
if best_loss is None:
return False, [new_loss, iteration_no_progress + 1]
best_loss_threshold = best_loss - abs(best_loss * (percent_increase / 100.0))
if new_loss is None or new_loss < best_loss_threshold:
best_loss = new_loss
iteration_no_progress = 0
else:
iteration_no_progress += 1
logger.debug(
"No progress made: %d iteration on %d. best_loss=%.2f, best_loss_threshold=%.2f, new_loss=%.2f"
% (
iteration_no_progress,
iteration_stop_count,
best_loss,
best_loss_threshold,
new_loss,
)
)
return (
iteration_no_progress >= iteration_stop_count,
[best_loss, iteration_no_progress],
)
return stop_fn
| 1,637 | 33.851064 | 111 |
py
|
hyperopt
|
hyperopt-master/hyperopt/std_out_err_redirect_tqdm.py
|
"""Redirecting writing to tqdm (the progressbar).
See here: https://github.com/tqdm/tqdm#redirecting-writing
"""
import contextlib
import sys
from tqdm import tqdm
class DummyTqdmFile:
"""Dummy file-like that will write to tqdm."""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
def flush(self):
return getattr(self.file, "flush", lambda: None)()
def close(self):
return getattr(self.file, "close", lambda: None)()
def isatty(self):
return getattr(self.file, "isatty", lambda: False)()
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
| 1,094 | 22.804348 | 65 |
py
|
hyperopt
|
hyperopt-master/hyperopt/__init__.py
|
from .base import STATUS_STRINGS
from .base import STATUS_NEW
from .base import STATUS_RUNNING
from .base import STATUS_SUSPENDED
from .base import STATUS_OK
from .base import STATUS_FAIL
from .base import JOB_STATES
from .base import JOB_STATE_NEW
from .base import JOB_STATE_RUNNING
from .base import JOB_STATE_DONE
from .base import JOB_STATE_ERROR
from .base import Ctrl
from .base import Trials
from .base import trials_from_docs
from .base import Domain
from .fmin import fmin
from .fmin import fmin_pass_expr_memo_ctrl
from .fmin import FMinIter
from .fmin import partial
from .fmin import space_eval
# -- syntactic sugar
from . import hp
# -- exceptions
from . import exceptions
# -- Import built-in optimization algorithms
from . import rand
from . import tpe
from . import atpe
from . import mix
from . import anneal
# -- spark extension
from .spark import SparkTrials
__version__ = "0.2.7"
| 909 | 20.666667 | 44 |
py
|
hyperopt
|
hyperopt-master/hyperopt/fmin.py
|
from future import standard_library
import functools
import inspect
import logging
import os
import sys
import time
from timeit import default_timer as timer
import numpy as np
from hyperopt import tpe, exceptions
from hyperopt.base import validate_timeout, validate_loss_threshold
from . import pyll
from .utils import coarse_utcnow
from . import base
from . import progress
standard_library.install_aliases()
logger = logging.getLogger(__name__)
try:
import cloudpickle as pickler
except Exception as e:
logger.info(
'Failed to load cloudpickle, try installing cloudpickle via "pip install '
'cloudpickle" for enhanced pickling support.'
)
import pickle as pickler
def generate_trial(tid, space):
variables = space.keys()
idxs = {v: [tid] for v in variables}
vals = {k: [v] for k, v in space.items()}
return {
"state": base.JOB_STATE_NEW,
"tid": tid,
"spec": None,
"result": {"status": "new"},
"misc": {
"tid": tid,
"cmd": ("domain_attachment", "FMinIter_Domain"),
"workdir": None,
"idxs": idxs,
"vals": vals,
},
"exp_key": None,
"owner": None,
"version": 0,
"book_time": None,
"refresh_time": None,
}
def generate_trials_to_calculate(points):
"""
Function that generates trials to be evaluated from list of points
:param points: List of points to be inserted in trials object in form of
dictionary with variable names as keys and variable values as dict
values. Example value:
[{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 1.0}]
:return: object of class base.Trials() with points which will be calculated
before optimisation start if passed to fmin().
"""
trials = base.Trials()
new_trials = [generate_trial(tid, x) for tid, x in enumerate(points)]
trials.insert_trial_docs(new_trials)
return trials
def fmin_pass_expr_memo_ctrl(f):
"""
Mark a function as expecting kwargs 'expr', 'memo' and 'ctrl' from
hyperopt.fmin.
expr - the pyll expression of the search space
memo - a partially-filled memo dictionary such that
`rec_eval(expr, memo=memo)` will build the proposed trial point.
ctrl - the Experiment control object (see base.Ctrl)
"""
f.fmin_pass_expr_memo_ctrl = True
return f
def partial(fn, **kwargs):
"""functools.partial work-alike for functions decorated with
fmin_pass_expr_memo_ctrl
"""
rval = functools.partial(fn, **kwargs)
if hasattr(fn, "fmin_pass_expr_memo_ctrl"):
rval.fmin_pass_expr_memo_ctrl = fn.fmin_pass_expr_memo_ctrl
return rval
def __objective_fmin_wrapper(func):
"""
Wrap the objective function on a dict to kwargs
"""
def _objective(kwargs):
return func(**kwargs)
return _objective
class FMinIter:
"""Object for conducting search experiments."""
catch_eval_exceptions = False
pickle_protocol = -1
def __init__(
self,
algo,
domain,
trials,
rstate,
asynchronous=None,
max_queue_len=1,
poll_interval_secs=1.0,
max_evals=sys.maxsize,
timeout=None,
loss_threshold=None,
verbose=False,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
self.algo = algo
self.domain = domain
self.trials = trials
if not show_progressbar or not verbose:
self.progress_callback = progress.no_progress_callback
elif show_progressbar is True:
self.progress_callback = progress.default_callback
else:
self.progress_callback = show_progressbar
if asynchronous is None:
self.asynchronous = trials.asynchronous
else:
self.asynchronous = asynchronous
self.poll_interval_secs = poll_interval_secs
self.max_queue_len = max_queue_len
self.max_evals = max_evals
self.early_stop_fn = early_stop_fn
self.early_stop_args = []
self.trials_save_file = trials_save_file
self.timeout = timeout
self.loss_threshold = loss_threshold
self.start_time = timer()
self.rstate = rstate
self.verbose = verbose
if self.asynchronous and not hasattr(self.trials, "_spark"):
if "FMinIter_Domain" in trials.attachments:
logger.warning("over-writing old domain trials attachment")
msg = pickler.dumps(domain)
# -- sanity check for unpickling
pickler.loads(msg)
trials.attachments["FMinIter_Domain"] = msg
def serial_evaluate(self, N=-1):
for trial in self.trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_NEW:
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
spec = base.spec_from_misc(trial["misc"])
ctrl = base.Ctrl(self.trials, current_trial=trial)
try:
result = self.domain.evaluate(spec, ctrl)
except Exception as e:
logger.error("job exception: %s" % str(e))
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
if not self.catch_eval_exceptions:
# -- JOB_STATE_ERROR means this trial
# will be removed from self.trials.trials
# by this refresh call.
self.trials.refresh()
raise
else:
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
N -= 1
if N == 0:
break
self.trials.refresh()
@property
def is_cancelled(self):
"""
Indicates whether this fmin run has been cancelled. SparkTrials supports cancellation.
"""
if hasattr(self.trials, "_fmin_cancelled"):
if self.trials._fmin_cancelled:
return True
return False
def block_until_done(self):
already_printed = False
if self.asynchronous:
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
def get_queue_len():
return self.trials.count_by_state_unsynced(unfinished_states)
qlen = get_queue_len()
while qlen > 0:
if not already_printed and self.verbose:
logger.info("Waiting for %d jobs to finish ..." % qlen)
already_printed = True
time.sleep(self.poll_interval_secs)
qlen = get_queue_len()
self.trials.refresh()
else:
self.serial_evaluate()
def run(self, N, block_until_done=True):
"""
Run `self.algo` iteratively (use existing `self.trials` to produce the new
ones), update, and repeat
block_until_done means that the process blocks until ALL jobs in
trials are not in running or new state
"""
trials = self.trials
algo = self.algo
n_queued = 0
def get_queue_len():
return self.trials.count_by_state_unsynced(base.JOB_STATE_NEW)
def get_n_done():
return self.trials.count_by_state_unsynced(base.JOB_STATE_DONE)
def get_n_unfinished():
unfinished_states = [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]
return self.trials.count_by_state_unsynced(unfinished_states)
stopped = False
initial_n_done = get_n_done()
with self.progress_callback(
initial=initial_n_done, total=self.max_evals
) as progress_ctx:
all_trials_complete = False
best_loss = float("inf")
while (
# more run to Q || ( block_flag & trials not done )
(n_queued < N or (block_until_done and not all_trials_complete))
# no timeout || < current last time
and (self.timeout is None or (timer() - self.start_time) < self.timeout)
# no loss_threshold || < current best_loss
and (self.loss_threshold is None or best_loss >= self.loss_threshold)
):
qlen = get_queue_len()
while (
qlen < self.max_queue_len and n_queued < N and not self.is_cancelled
):
n_to_enqueue = min(self.max_queue_len - qlen, N - n_queued)
# get ids for next trials to enqueue
new_ids = trials.new_trial_ids(n_to_enqueue)
self.trials.refresh()
# Based on existing trials and the domain, use `algo` to probe in
# new hp points. Save the results of those inspections into
# `new_trials`. This is the core of `run`, all the rest is just
# processes orchestration
new_trials = algo(
new_ids, self.domain, trials, self.rstate.integers(2**31 - 1)
)
assert len(new_ids) >= len(new_trials)
if len(new_trials):
self.trials.insert_trial_docs(new_trials)
self.trials.refresh()
n_queued += len(new_trials)
qlen = get_queue_len()
else:
stopped = True
break
if self.is_cancelled:
break
if self.asynchronous:
# -- wait for workers to fill in the trials
time.sleep(self.poll_interval_secs)
else:
# -- loop over trials and do the jobs directly
self.serial_evaluate()
self.trials.refresh()
if self.trials_save_file != "":
pickler.dump(self.trials, open(self.trials_save_file, "wb"))
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(
self.trials, *self.early_stop_args
)
self.early_stop_args = kwargs
if stop:
logger.info(
"Early stop triggered. Stopping iterations as condition is reach."
)
stopped = True
# update progress bar with the min loss among trials with status ok
losses = [
loss
for loss in self.trials.losses()
if loss is not None and not np.isnan(loss)
]
if losses:
best_loss = min(losses)
progress_ctx.postfix = "best loss: " + str(best_loss)
n_unfinished = get_n_unfinished()
if n_unfinished == 0:
all_trials_complete = True
n_done = get_n_done()
n_done_this_iteration = n_done - initial_n_done
if n_done_this_iteration > 0:
progress_ctx.update(n_done_this_iteration)
initial_n_done = n_done
if stopped:
break
if block_until_done:
self.block_until_done()
self.trials.refresh()
logger.info("Queue empty, exiting run.")
else:
qlen = get_queue_len()
if qlen:
msg = "Exiting run, not waiting for %d jobs." % qlen
logger.info(msg)
def __iter__(self):
return self
def __next__(self):
self.run(1, block_until_done=self.asynchronous)
if self.early_stop_fn is not None:
stop, kwargs = self.early_stop_fn(self.trials, *self.early_stop_args)
self.early_stop_args = kwargs
if stop:
raise StopIteration()
if len(self.trials) >= self.max_evals:
raise StopIteration()
return self.trials
def exhaust(self):
n_done = len(self.trials)
self.run(self.max_evals - n_done, block_until_done=self.asynchronous)
self.trials.refresh()
return self
def fmin(
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
trials=None,
rstate=None,
allow_trials_fmin=True,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
verbose=True,
return_argmin=True,
points_to_evaluate=None,
max_queue_len=1,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
`trials`
Parameters
----------
fn : callable (trial point -> loss)
This function will be called with a value generated from `space`
as the first and possibly only argument. It can return either
a scalar-valued loss, or a dictionary. A returned dictionary must
contain a 'status' key with a value from `STATUS_STRINGS`, must
contain a 'loss' key if the status is `STATUS_OK`. Particular
optimization algorithms may look for other keys as well. An
optional sub-dictionary associated with an 'attachments' key will
be removed by fmin its contents will be available via
`trials.trial_attachments`. The rest (usually all) of the returned
dictionary will be stored and available later as some 'result'
sub-dictionary within `trials.trials`.
space : hyperopt.pyll.Apply node or "annotated"
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
If set to "annotated", will read space using type hint in fn. Ex:
(`def fn(x: hp.uniform("x", -1, 1)): return x`)
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
max_evals : int
Allow up to this many function evaluations before returning.
timeout : None or int, default None
Limits search time by parametrized number of seconds.
If None, then the search process has no time constraint.
loss_threshold : None or double, default None
Limits search time when minimal loss reduced to certain amount.
If None, then the search process has no constraint on the loss,
and will stop based on other parameters, e.g. `max_evals`, `timeout`
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
rstate : numpy.random.Generator, default numpy.random or `$HYPEROPT_FMIN_SEED`
Each call to `algo` requires a seed value, which should be different
on each call. This object is used to draw these seeds via `randint`.
The default rstate is
`numpy.random.default_rng(int(env['HYPEROPT_FMIN_SEED']))`
if the `HYPEROPT_FMIN_SEED` environment variable is set to a non-empty
string, otherwise np.random is used in whatever state it is in.
verbose : bool
Print out some information to stdout during search. If False, disable
progress bar irrespectively of show_progressbar argument
allow_trials_fmin : bool, default True
If the `trials` argument
pass_expr_memo_ctrl : bool, default False
If set to True, `fn` will be called in a different more low-level
way: it will receive raw hyperparameters, a partially-populated
`memo`, and a Ctrl object for communication with this Trials
object.
return_argmin : bool, default True
If set to False, this function returns nothing, which can be useful
for example if it is expected that `len(trials)` may be zero after
fmin, and therefore `trials.argmin` would be undefined.
points_to_evaluate : list, default None
Only works if trials=None. If points_to_evaluate equals None then the
trials are evaluated normally. If list of dicts is passed then
given points are evaluated before optimisation starts, so the overall
number of optimisation steps is len(points_to_evaluate) + max_evals.
Elements of this list must be in a form of a dictionary with variable
names as keys and variable values as dict values. Example
points_to_evaluate value is [{'x': 0.0, 'y': 0.0}, {'x': 1.0, 'y': 2.0}]
max_queue_len : integer, default 1
Sets the queue length generated in the dictionary or trials. Increasing this
value helps to slightly speed up parallel simulatulations which sometimes lag
on suggesting a new trial.
show_progressbar : bool or context manager, default True (or False if verbose is False).
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
early_stop_fn: callable ((result, *args) -> (Boolean, *args)).
Called after every run with the result of the run and the values returned by the function previously.
Stop the search if the function return true.
Default None.
trials_save_file: str, default ""
Optional file name to save the trials object to every iteration.
If specified and the file already exists, will load from this file when
trials=None instead of creating a new base.Trials object
Returns
-------
argmin : dictionary
If return_argmin is True returns `trials.argmin` which is a dictionary. Otherwise
this function returns the result of `hyperopt.space_eval(space, trails.argmin)` if there
were successfull trails. This object shares the same structure as the space passed.
If there were no successfull trails, it returns None.
"""
if algo is None:
algo = tpe.suggest
logger.warning("TPE is being used as the default algorithm.")
if max_evals is None:
max_evals = sys.maxsize
if rstate is None:
env_rseed = os.environ.get("HYPEROPT_FMIN_SEED", "")
if env_rseed:
rstate = np.random.default_rng(int(env_rseed))
else:
rstate = np.random.default_rng()
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
if space == "annotated":
# Read space from objective fn
space = inspect.getfullargspec(fn).annotations
# Validate space
for param, hp_func in space.items():
if not isinstance(hp_func, pyll.base.Apply):
raise exceptions.InvalidAnnotatedParameter(
'When using `space="annotated"`, please annotate the '
"objective function arguments with a `pyll.base.Apply` "
"subclass. See example in `fmin` docstring"
)
# Change fn to accept a dict-like argument
fn = __objective_fmin_wrapper(fn)
if allow_trials_fmin and hasattr(trials, "fmin"):
return trials.fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
rstate=rstate,
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
verbose=verbose,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
if trials is None:
if os.path.exists(trials_save_file):
trials = pickler.load(open(trials_save_file, "rb"))
elif points_to_evaluate is None:
trials = base.Trials()
else:
assert type(points_to_evaluate) == list
trials = generate_trials_to_calculate(points_to_evaluate)
domain = base.Domain(fn, space, pass_expr_memo_ctrl=pass_expr_memo_ctrl)
rval = FMinIter(
algo,
domain,
trials,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
rval.catch_eval_exceptions = catch_eval_exceptions
# next line is where the fmin is actually executed
rval.exhaust()
if return_argmin:
if len(trials.trials) == 0:
raise Exception(
"There are no evaluation tasks, cannot return argmin of task losses."
)
return trials.argmin
if len(trials) > 0:
# Only if there are some successful trail runs, return the best point in
# the evaluation space
return space_eval(space, trials.argmin)
return None
def space_eval(space, hp_assignment):
"""Compute a point in a search space from a hyperparameter assignment.
Parameters:
-----------
space - a pyll graph involving hp nodes (see `pyll_utils`).
hp_assignment - a dictionary mapping hp node labels to values.
"""
space = pyll.as_apply(space)
nodes = pyll.toposort(space)
memo = {}
for node in nodes:
if node.name == "hyperopt_param":
label = node.arg["label"].eval()
if label in hp_assignment:
memo[node] = hp_assignment[label]
rval = pyll.rec_eval(space, memo=memo)
return rval
# -- flake8 doesn't like blank last line
| 22,699 | 35.495177 | 109 |
py
|
hyperopt
|
hyperopt-master/hyperopt/algobase.py
|
""" Support code for new-style search algorithms.
"""
import copy
from collections import deque
import numpy as np
from . import pyll
from .base import miscs_update_idxs_vals
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
class ExprEvaluator:
def __init__(self, expr, deepcopy_inputs=False, max_program_len=None, memo_gc=True):
"""
Parameters
----------
expr - pyll Apply instance to be evaluated
deepcopy_inputs - deepcopy inputs to every node prior to calling that
node's function on those inputs. If this leads to a different
return value, then some function (XXX add more complete DebugMode
functionality) in your graph is modifying its inputs and causing
mis-calculation. XXX: This is not a fully-functional DebugMode
because if the offender happens on account of the toposort order
to be the last user of said input, then it will not be detected as
a potential problem.
max_program_len : int (default pyll.base.DEFAULT_MAX_PROGRAM_LEN)
If more than this many nodes are evaluated in the course of
evaluating `expr`, then evaluation is aborted under the assumption
that an infinite recursion is underway.
memo_gc : bool
If True, values computed for apply nodes within `expr` may be
cleared during computation. The bookkeeping required to do this
takes a bit of extra time, but usually no big deal.
"""
self.expr = pyll.as_apply(expr)
if deepcopy_inputs not in (0, 1, False, True):
# -- I've been calling rec_eval(expr, memo) by accident a few times
# this error would have been appreciated.
#
# TODO: Good candidate for Py3K keyword-only argument
raise ValueError("deepcopy_inputs should be bool", deepcopy_inputs)
self.deepcopy_inputs = deepcopy_inputs
if max_program_len is None:
self.max_program_len = pyll.base.DEFAULT_MAX_PROGRAM_LEN
else:
self.max_program_len = max_program_len
self.memo_gc = memo_gc
def eval_nodes(self, memo=None):
if memo is None:
memo = {}
else:
memo = dict(memo)
# TODO: optimize dfs to not recurse past the items in memo
# this is especially important for evaluating Lambdas
# which cause rec_eval to recurse
#
# N.B. that Lambdas may expand the graph during the evaluation
# so that this iteration may be an incomplete
if self.memo_gc:
clients = self.clients = {}
for aa in pyll.dfs(self.expr):
clients.setdefault(aa, set())
for ii in aa.inputs():
clients.setdefault(ii, set()).add(aa)
todo = deque([self.expr])
while todo:
if len(todo) > self.max_program_len:
raise RuntimeError("Probably infinite loop in document")
node = todo.pop()
if node in memo:
# -- we've already computed this, move on.
continue
# -- different kinds of nodes are treated differently:
if node.name == "switch":
waiting_on = self.on_switch(memo, node)
if waiting_on is None:
continue
elif isinstance(node, pyll.Literal):
# -- constants go straight into the memo
self.set_in_memo(memo, node, node.obj)
continue
else:
# -- normal instruction-type nodes have inputs
waiting_on = [v for v in node.inputs() if v not in memo]
if waiting_on:
# -- Necessary inputs have yet to be evaluated.
# push the node back in the queue, along with the
# inputs it still needs
todo.append(node)
todo.extend(waiting_on)
else:
rval = self.on_node(memo, node)
if isinstance(rval, pyll.Apply):
# -- if an instruction returns a Pyll apply node
# it means evaluate that too. Lambdas do this.
#
# XXX: consider if it is desirable, efficient, buggy
# etc. to keep using the same memo dictionary.
# I think it is OK because by using the same
# dictionary all of the nodes are stored in the memo
# so all keys are preserved until the entire outer
# function returns
evaluator = self.__class__(
rval, self.deep_copy_inputs, self.max_program_len, self.memo_gc
)
foo = evaluator(memo)
self.set_in_memo(memo, node, foo)
else:
self.set_in_memo(memo, node, rval)
return memo
def set_in_memo(self, memo, k, v):
"""Assign memo[k] = v
This is implementation optionally drops references to the arguments
"clients" required to compute apply-node `k`, which allows those
objects to be garbage-collected. This feature is enabled by
`self.memo_gc`.
"""
if self.memo_gc:
assert v is not pyll.base.GarbageCollected
memo[k] = v
for ii in k.inputs():
# -- if all clients of ii are already in the memo
# then we can free memo[ii] by replacing it
# with a dummy symbol
if all(iic in memo for iic in self.clients[ii]):
memo[ii] = pyll.base.GarbageCollected
else:
memo[k] = v
def on_switch(self, memo, node):
# -- pyll.base.switch is a control-flow expression.
#
# It's signature is
# int, option0, option1, option2, ..., optionN
#
# The semantics of a switch node are to only evaluate the option
# corresponding to the value of the leading integer. (Think of
# a switch block in the C language.)
#
# This is a helper-function to self.eval_nodes. It returns None,
# or a list of apply-nodes required to evaluate the given switch
# node.
#
# When it returns None, the memo has been updated so that
# memo[`node`] has been assigned the computed value for the given
# switch node.
#
switch_i_var = node.pos_args[0]
if switch_i_var in memo:
switch_i = memo[switch_i_var]
try:
int(switch_i)
except:
raise TypeError("switch argument was", switch_i)
if switch_i != int(switch_i) or switch_i < 0:
raise ValueError("switch pos must be positive int", switch_i)
rval_var = node.pos_args[switch_i + 1]
if rval_var in memo:
self.set_in_memo(memo, node, memo[rval_var])
return
else:
return [rval_var]
else:
return [switch_i_var]
def on_node(self, memo, node):
# -- Retrieve computed arguments of apply node
args = _args = [memo[v] for v in node.pos_args]
kwargs = _kwargs = {k: memo[v] for (k, v) in node.named_args}
if self.memo_gc:
# -- Ensure no computed argument has been (accidentally) freed for
# garbage-collection.
for aa in args + list(kwargs.values()):
assert aa is not pyll.base.GarbageCollected
if self.deepcopy_inputs:
# -- I think this is supposed to be skipped if node.pure == True
# because that attribute is supposed to mark the node as having
# no side-effects that affect expression-evaluation.
#
# HOWEVER That has not been tested in a while, and it's hard to
# verify (with e.g. unit tests) that a node marked "pure" isn't
# lying. So we hereby ignore the `pure` attribute and copy
# everything to be on the safe side.
args = copy.deepcopy(_args)
kwargs = copy.deepcopy(_kwargs)
return pyll.scope._impls[node.name](*args, **kwargs)
class SuggestAlgo(ExprEvaluator):
"""Add constructor and call signature to match suggest()
Also, detect when on_node is handling a hyperparameter, and
delegate that to an `on_node_hyperparameter` method. This method
must be implemented by a derived class.
"""
def __init__(self, domain, trials, seed):
ExprEvaluator.__init__(self, domain.s_idxs_vals)
self.domain = domain
self.trials = trials
self.label_by_node = {
n: l for l, n in list(self.domain.vh.vals_by_label().items())
}
self._seed = seed
self.rng = np.random.default_rng(seed)
def __call__(self, new_id):
self.rng = np.random.default_rng(self._seed + new_id)
memo = self.eval_nodes(
memo={self.domain.s_new_ids: [new_id], self.domain.s_rng: self.rng}
)
idxs, vals = memo[self.expr]
new_result = self.domain.new_result()
new_misc = dict(tid=new_id, cmd=self.domain.cmd, workdir=self.domain.workdir)
miscs_update_idxs_vals([new_misc], idxs, vals)
rval = self.trials.new_trial_docs([new_id], [None], [new_result], [new_misc])
return rval
def on_node(self, memo, node):
if node in self.label_by_node:
label = self.label_by_node[node]
return self.on_node_hyperparameter(memo, node, label)
else:
return ExprEvaluator.on_node(self, memo, node)
def batch(self, new_ids):
new_ids = list(new_ids)
self.rng = np.random.default_rng([self._seed] + new_ids)
memo = self.eval_nodes(
memo={self.domain.s_new_ids: new_ids, self.domain.s_rng: self.rng}
)
idxs, vals = memo[self.expr]
return idxs, vals
# -- flake-8 abhors blank line EOF
| 10,344 | 39.096899 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/anneal.py
|
# TODO: add this to documentation
"""
Annealing algorithm for hyperopt
Annealing is a simple but effective variant on random search that
takes some advantage of a smooth response surface.
The simple (but not overly simple) code of simulated annealing makes this file
a good starting point for implementing new search algorithms.
"""
from past.utils import old_div
import logging
import numpy as np
from hyperopt.pyll.base import bincount
from .pyll.stochastic import (
categorical,
normal,
lognormal,
qnormal,
qlognormal,
uniform,
loguniform,
quniform,
qloguniform,
)
from .base import miscs_to_idxs_vals
from .algobase import SuggestAlgo, ExprEvaluator
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
logger = logging.getLogger(__name__)
class AnnealingAlgo(SuggestAlgo):
"""
This simple annealing algorithm begins by sampling from the prior,
but tends over time to sample from points closer and closer to the best
ones observed.
In addition to the value of this algorithm as a baseline optimization
strategy, it is a simple starting point for implementing new algorithms.
# The Annealing Algorithm
The annealing algorithm is to choose one of the previous trial points
as a starting point, and then to sample each hyperparameter from a similar
distribution to the one specified in the prior, but whose density is more
concentrated around the trial point we selected.
This algorithm is a simple variation on random search that leverages
smoothness in the response surface. The annealing rate is not adaptive.
## Choosing a Best Trial
The algorithm formalizes the notion of "one of the best trials" by
sampling a position from a geometric distribution whose mean is the
`avg_best_idx` parameter. The "best trial" is the trial thus selected
from the set of all trials (`self.trials`).
It may happen that in the process of ancestral sampling, we may find that
the best trial at some ancestral point did not use the hyperparameter we
need to draw. In such a case, this algorithm will draw a new "runner up"
best trial, and use that one as if it had been chosen as the best trial.
The set of best trials, and runner-up best trials obtained during the
process of choosing all hyperparameters is kept sorted by the validation
loss, and at each point where the best trial does not define a
required hyperparameter value, we actually go through all the list of
runners-up too, before giving up and adding a new runner-up trial.
## Concentrating Prior Distributions
To sample a hyperparameter X within a search space, we look at
what kind of hyperparameter it is (what kind of distribution it's from)
and the previous successful values of that hyperparameter, and make
a new proposal for that hyperparameter independently of other
hyperparameters (except technically any choice nodes that led us to use
this current hyperparameter in the first place).
For example, if X is a uniform-distributed hyperparameters drawn from
`U(l, h)`, we look at the value `x` of the hyperparameter in the selected
trial, and draw from a new uniform density `U(x - w/2, x + w/2)`, where w
is related to the initial range, and the number of observations we have for
X so far. If W is the initial range, and T is the number of observations
we have, then w = W / (1 + T * shrink_coef). If the resulting range would
extend either below l or above h, we shift it to fit into the original
bounds.
"""
def __init__(self, domain, trials, seed, avg_best_idx=2.0, shrink_coef=0.1):
"""
Parameters
----------
avg_best_idx: float
Mean of geometric distribution over which trial to explore around,
selecting from trials sorted by score (0 is best)
shrink_coef: float
Rate of reduction in the size of sampling neighborhood as more
points have been explored.
"""
SuggestAlgo.__init__(self, domain, trials, seed=seed)
self.avg_best_idx = avg_best_idx
self.shrink_coef = shrink_coef
doc_by_tid = {}
for doc in trials.trials:
# get either this docs own tid or the one that it's from
tid = doc["tid"]
loss = domain.loss(doc["result"], doc["spec"])
# -- associate infinite loss to new/running/failed jobs
loss = float("inf" if loss is None else loss)
doc_by_tid[tid] = (doc, loss)
self.tid_docs_losses = sorted(doc_by_tid.items())
self.tids = np.asarray([t for (t, (d, l)) in self.tid_docs_losses])
self.losses = np.asarray([l for (t, (d, l)) in self.tid_docs_losses])
self.tid_losses_dct = dict(list(zip(self.tids, self.losses)))
# node_tids: dict from hp label -> trial ids (tids) using that hyperparam
# node_vals: dict from hp label -> values taken by that hyperparam
self.node_tids, self.node_vals = miscs_to_idxs_vals(
[d["misc"] for (tid, (d, l)) in self.tid_docs_losses],
keys=list(domain.params.keys()),
)
self.best_tids = []
def shrinking(self, label):
"""Return fraction of original search width
Parameters
----------
label: string
the name of a hyperparameter
"""
T = len(self.node_vals[label])
return old_div(1.0, (1.0 + T * self.shrink_coef))
def choose_ltv(self, label, size):
"""Returns (loss, tid, val) of best/runner-up trial"""
tids = self.node_tids[label]
vals = self.node_vals[label]
losses = [self.tid_losses_dct[tid] for tid in tids]
if size == 1:
# -- try to return the value corresponding to one of the
# trials that was previously chosen (non-independence
# of hyperparameter values)
# This doesn't really make sense if we're sampling a lot of
# points at a time.
tid_set = set(tids)
for tid in self.best_tids:
if tid in tid_set:
idx = tids.index(tid)
rval = losses[idx], tid, vals[idx]
return rval
# -- choose a new good seed point
good_idx = self.rng.geometric(old_div(1.0, self.avg_best_idx), size=size) - 1
good_idx = np.clip(good_idx, 0, len(tids) - 1).astype("int32")
picks = np.argsort(losses)[good_idx]
picks_loss = np.asarray(losses)[picks]
picks_tids = np.asarray(tids)[picks]
picks_vals = np.asarray(vals)[picks]
if size == 1:
self.best_tids.append(int(picks_tids))
return picks_loss, picks_tids, picks_vals
def on_node_hyperparameter(self, memo, node, label):
"""
Return a new value for one hyperparameter.
Parameters:
-----------
memo - a partially-filled dictionary of node -> list-of-values
for the nodes in a vectorized representation of the
original search space.
node - an Apply instance in the vectorized search space,
which corresponds to a hyperparameter
label - a string, the name of the hyperparameter
Returns: a list with one value in it: the suggested value for this
hyperparameter
Notes
-----
This function works by delegating to self.hp_HPTYPE functions to
handle each of the kinds of hyperparameters in hyperopt.pyll_utils.
Other search algorithms can implement this function without
delegating based on the hyperparameter type, but it's a pattern
I've used a few times so I show it here.
"""
n_observations = len(self.node_vals[label])
if n_observations > 0:
# -- Pick a previous trial on which to base the new sample
size = memo[node.arg["size"]]
loss, tid, val = self.choose_ltv(label, size=size)
try:
handler = getattr(self, "hp_%s" % node.name)
except AttributeError:
raise NotImplementedError("Annealing", node.name)
return handler(memo, node, label, tid, val)
else:
# -- Draw the new sample from the prior
return ExprEvaluator.on_node(self, memo, node)
def hp_uniform(
self,
memo,
node,
label,
tid,
val,
log_scale=False,
pass_q=False,
uniform_like=uniform,
):
"""
Return a new value for a uniform hyperparameter.
Parameters:
-----------
memo - (see on_node_hyperparameter)
node - (see on_node_hyperparameter)
label - (see on_node_hyperparameter)
tid - trial-identifier of the model trial on which to base a new sample
val - the value of this hyperparameter on the model trial
Returns: a list with one value in it: the suggested value for this
hyperparameter
"""
midpt = np.log(val) if log_scale else val
high = memo[node.arg["high"]]
low = memo[node.arg["low"]]
width = (high - low) * self.shrinking(label)
half = 0.5 * width
min_midpt = low + half
max_midpt = high - half
clipped_midpt = np.clip(midpt, min_midpt, max_midpt)
if pass_q:
return uniform_like(
low=clipped_midpt - half,
high=clipped_midpt + half,
rng=self.rng,
q=memo[node.arg["q"]],
size=memo[node.arg["size"]],
)
else:
return uniform_like(
low=clipped_midpt - half,
high=clipped_midpt + half,
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_quniform(self, *args, **kwargs):
return self.hp_uniform(pass_q=True, uniform_like=quniform, *args, **kwargs)
def hp_loguniform(self, *args, **kwargs):
return self.hp_uniform(
log_scale=True, pass_q=False, uniform_like=loguniform, *args, **kwargs
)
def hp_qloguniform(self, *args, **kwargs):
return self.hp_uniform(
log_scale=True, pass_q=True, uniform_like=qloguniform, *args, **kwargs
)
def hp_randint(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
low = memo[node.arg["low"]]
high = memo.get(node.arg["high"])
# if high is None, the domain is [0, low), else it is [low, high)
domain_size = low if high is None else high - low
offset = 0 if high is None else low
val1 = np.atleast_1d(val)
if val1.size:
counts = old_div(
bincount(val1, offset=offset, minlength=domain_size), float(val1.size)
)
else:
counts = np.zeros(domain_size)
prior = self.shrinking(label)
p = (1 - prior) * counts + prior * (old_div(1.0, domain_size))
rval = categorical(p=p, rng=self.rng, size=memo[node.arg["size"]]) + offset
return rval
def hp_categorical(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
size = memo[node.arg["size"]]
if size == 0:
return []
val1 = np.atleast_1d(val)
p = p_orig = np.asarray(memo[node.arg["p"]])
if p.ndim == 2:
if len(p) not in (1, len(val1)):
print(node)
print(p)
print(np.asarray(p).shape)
assert len(p) in (1, len(val1))
else:
assert p.ndim == 1
p = p[np.newaxis, :]
if val1.size:
counts = old_div(np.bincount(val1, minlength=p.size), float(val1.size))
prior = self.shrinking(label)
else:
counts = np.zeros(p.size)
prior = 1.0
new_p = (1 - prior) * counts + prior * p
assert new_p.ndim == 2
rval = categorical(p=new_p, rng=self.rng, size=size)
if p_orig.ndim == 1:
assert len(rval) == 1
return rval[0]
return rval
def hp_normal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return normal(
mu=val,
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_lognormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return lognormal(
mu=np.log(val),
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_qlognormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return qlognormal(
# -- prevent log(0) without messing up algo
mu=np.log(1e-16 + val),
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
q=memo[node.arg["q"]],
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_qnormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return qnormal(
mu=val,
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
q=memo[node.arg["q"]],
rng=self.rng,
size=memo[node.arg["size"]],
)
def suggest(new_ids, domain, trials, seed, *args, **kwargs):
(new_id,) = new_ids
return AnnealingAlgo(domain, trials, seed, *args, **kwargs)(new_id)
def suggest_batch(new_ids, domain, trials, seed, *args, **kwargs):
return AnnealingAlgo(domain, trials, seed, *args, **kwargs).batch(new_ids)
# -- flake-8 abhors blank line EOF
| 14,079 | 34.288221 | 86 |
py
|
hyperopt
|
hyperopt-master/hyperopt/ipy.py
|
"""Utilities for Parallel Model Selection with
on
Author: James Bergstra <[email protected]>
Licensed: MIT
"""
from time import sleep, time
import numpy as np
from .base import Trials
from .base import Domain
from .base import JOB_STATE_NEW
from .base import JOB_STATE_RUNNING
from .base import JOB_STATE_DONE
from .base import JOB_STATE_ERROR
from .base import spec_from_misc
from .base import Ctrl
from .utils import coarse_utcnow
import sys
print(sys.stderr, "WARNING: IPythonTrials is not as complete, stable", file=sys.stderr)
print(" or well tested as Trials or MongoTrials.", file=sys.stderr)
class LostEngineError(RuntimeError):
"""An IPEngine disappeared during computation, and a job with it."""
class IPythonTrials(Trials):
def __init__(self, client, job_error_reaction="raise", save_ipy_metadata=True):
self._client = client
self._clientlbv = client.load_balanced_view()
self.job_map = {}
self.job_error_reaction = job_error_reaction
self.save_ipy_metadata = save_ipy_metadata
Trials.__init__(self)
self._testing_fmin_was_called = False
def _insert_trial_docs(self, docs):
rval = [doc["tid"] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
def refresh(self):
job_map = {}
# -- carry over state for active engines
for eid in self._client.ids:
job_map[eid] = self.job_map.pop(eid, (None, None))
# -- deal with lost engines, abandoned promises
for eid, (p, tt) in list(self.job_map.items()):
if self.job_error_reaction == "raise":
raise LostEngineError(p)
elif self.job_error_reaction == "log":
tt["error"] = "LostEngineError (%s)" % str(p)
tt["state"] = JOB_STATE_ERROR
else:
raise ValueError(self.job_error_reaction)
# -- remove completed jobs from job_map
for eid, (p, tt) in list(job_map.items()):
if p is None:
continue
if p.ready():
try:
tt["result"] = p.get()
tt["state"] = JOB_STATE_DONE
job_map[eid] = (None, None)
except Exception as e:
if self.job_error_reaction == "raise":
raise
elif self.job_error_reaction == "log":
tt["error"] = str(e)
tt["state"] = JOB_STATE_ERROR
else:
raise ValueError(self.job_error_reaction)
if self.save_ipy_metadata:
tt["ipy_metadata"] = p.metadata
tt["refresh_time"] = coarse_utcnow()
del job_map[eid]
self.job_map = job_map
Trials.refresh(self)
def fmin(self, fn, space, **kw):
# TODO: all underscore variables are completely unused throughout.
algo = kw.get("algo")
max_evals = kw.get("max_evals")
rstate = kw.get("rstate", None)
_allow_trials_fmin = (True,)
_pass_expr_memo_ctrl = (None,)
_catch_eval_exceptions = (False,)
verbose = kw.get("verbose", 0)
_return_argmin = (True,)
wait = (True,)
pass_expr_memo_ctrl = (None,)
if rstate is None:
rstate = np.random
# -- used in test_ipy
self._testing_fmin_was_called = True
if pass_expr_memo_ctrl is None:
try:
pass_expr_memo_ctrl = fn.pass_expr_memo_ctrl
except AttributeError:
pass_expr_memo_ctrl = False
domain = Domain(fn, space, None, pass_expr_memo_ctrl=False)
last_print_time = 0
while len(self._dynamic_trials) < max_evals:
self.refresh()
if verbose and last_print_time + 1 < time():
print(
"fmin: %4i/%4i/%4i/%4i %f"
% (
self.count_by_state_unsynced(JOB_STATE_NEW),
self.count_by_state_unsynced(JOB_STATE_RUNNING),
self.count_by_state_unsynced(JOB_STATE_DONE),
self.count_by_state_unsynced(JOB_STATE_ERROR),
min(
[float("inf")] + [l for l in self.losses() if l is not None]
),
)
)
last_print_time = time()
idles = [eid for (eid, (p, tt)) in list(self.job_map.items()) if p is None]
if idles:
new_ids = self.new_trial_ids(len(idles))
new_trials = algo(new_ids, domain, self, rstate.integers(2**31 - 1))
if len(new_trials) == 0:
break
assert len(idles) >= len(new_trials)
for eid, new_trial in zip(idles, new_trials):
now = coarse_utcnow()
new_trial["book_time"] = now
new_trial["refresh_time"] = now
(tid,) = self.insert_trial_docs([new_trial])
promise = call_domain(
domain,
spec_from_misc(new_trial["misc"]),
Ctrl(self, current_trial=new_trial),
new_trial,
self._clientlbv,
eid,
tid,
)
# -- XXX bypassing checks because 'ar'
# is not ok for SONify... but should check
# for all else being SONify
tt = self._dynamic_trials[-1]
assert tt["tid"] == tid
self.job_map[eid] = (promise, tt)
tt["state"] = JOB_STATE_RUNNING
if wait:
if verbose:
print("fmin: Waiting on remaining jobs...")
self.wait(verbose=verbose)
return self.argmin
def wait(self, verbose=False, verbose_print_interval=1.0):
last_print_time = 0
while True:
self.refresh()
if verbose and last_print_time + verbose_print_interval < time():
print(
"fmin: %4i/%4i/%4i/%4i %f"
% (
self.count_by_state_unsynced(JOB_STATE_NEW),
self.count_by_state_unsynced(JOB_STATE_RUNNING),
self.count_by_state_unsynced(JOB_STATE_DONE),
self.count_by_state_unsynced(JOB_STATE_ERROR),
min(
[float("inf")] + [l for l in self.losses() if l is not None]
),
)
)
last_print_time = time()
if self.count_by_state_unsynced(JOB_STATE_NEW):
sleep(1e-1)
continue
if self.count_by_state_unsynced(JOB_STATE_RUNNING):
sleep(1e-1)
continue
break
def __getstate__(self):
rval = dict(self.__dict__)
del rval["_client"]
del rval["_trials"]
del rval["job_map"]
# print rval.keys()
return rval
def __setstate__(self, dct):
self.__dict__ = dct
self.job_map = {}
Trials.refresh(self)
# Monkey patching to allow the apply_async call and response to
# be handled on behalf of the domain.
class IPYAsync:
def __init__(self, asynchronous, domain, rv, eid, tid, ctrl):
self.asynchronous = asynchronous
self.domain = domain
self.rv = rv
self.metadata = self.asynchronous.metadata
self.eid = eid
self.tid = tid
self.ctrl = ctrl
def ready(self):
return self.asynchronous.ready()
def get(self):
if self.asynchronous.successful():
val = self.asynchronous.get()
return self.domain.evaluate_async2(val, self.ctrl)
return self.rv
pass
# @interactive
def call_domain(domain, spec, ctrl, trial, view, eid, tid):
rv = {"loss": None, "status": "fail"}
# TODO: rt unused
rt = coarse_utcnow()
# print "in call domain for spec", str(spec)
promise = None
fn, pyll_rval = domain.evaluate_async(spec, ctrl)
promise = IPYAsync(view.apply_async(fn, pyll_rval), domain, rv, eid, tid, ctrl)
return promise
| 8,500 | 32.868526 | 88 |
py
|
hyperopt
|
hyperopt-master/hyperopt/mix.py
|
import numpy as np
def suggest(new_ids, domain, trials, seed, p_suggest):
"""Return the result of a randomly-chosen suggest function
For example to search by sometimes using random search, sometimes anneal,
and sometimes tpe, type:
fmin(...,
algo=partial(mix.suggest,
p_suggest=[
(.1, rand.suggest),
(.2, anneal.suggest),
(.7, tpe.suggest),]),
)
Parameters
----------
p_suggest: list of (probability, suggest) pairs
Make a suggestion from one of the suggest functions,
in proportion to its corresponding probability.
sum(probabilities) must be [close to] 1.0
"""
rng = np.random.default_rng(seed)
ps, suggests = list(zip(*p_suggest))
assert len(ps) == len(suggests) == len(p_suggest)
if not np.isclose(sum(ps), 1.0):
raise ValueError("Probabilities should sum to 1", ps)
idx = rng.multinomial(n=1, pvals=ps).argmax()
return suggests[idx](new_ids, domain, trials, seed=rng.integers(2**31))
| 1,089 | 30.142857 | 77 |
py
|
hyperopt
|
hyperopt-master/hyperopt/sklearn.py
|
"""Scikit-learn integration.
This class is based on :class:`sklearn.model_selection._search.BaseSearchCV` and
inspired by :class:sklearn.model_selection._search_successive_halving.BaseSuccessiveHalving`.
"""
import numpy as np
from sklearn.model_selection._search import is_classifier
from sklearn.model_selection._search import BaseSearchCV
from sklearn.utils.multiclass import check_classification_targets, unique_labels
from sklearn.utils.validation import check_array
from hyperopt.base import STATUS_OK, Trials
from hyperopt.fmin import fmin
class HyperoptSearchCV(BaseSearchCV):
"""Hyper-parameter search with hyperopt.
Parameters
----------
estimator : estimator object
An object of that type is instantiated for each set of candidate parameters.
This is assumed to implement the ``scikit-learn`` estimator interface. The
estimator needs to provide a ``score`` method or ``scoring`` must be passed.
space : hyperopt.pyll.Apply node or "annotated"
The set of possible arguments to `fn` is the set of objects
that could be created with non-zero probability by drawing randomly
from this stochastic program involving involving hp_<xxx> nodes
(see `hyperopt.hp` and `hyperopt.pyll_utils`).
If set to "annotated", will read space using type hint in fn. Ex:
(`def fn(x: hp.uniform("x", -1, 1)): return x`)
max_evals : int
Allow up to this many function evaluations before returning.
trials : None or base.Trials (or subclass)
Storage for completed, ongoing, and scheduled evaluation points. If
None, then a temporary `base.Trials` instance will be created. If
a trials object, then that trials object will be affected by
side-effect of this call.
algo : search algorithm
This object, such as `hyperopt.rand.suggest` and
`hyperopt.tpe.suggest` provides logic for sequential search of the
hyperparameter space.
warm_start : bool, optional (default False)
When set to True, reuse the solution of the previous ``fit`` call and add
iterations to the trials object. Otherwise, reset the ``trials``. ``max_evals``
refers to the total number of iterations in the ``Trials`` object, so use ``set_params``
to increase the total number.
scoring : str or callable, optional (default None)
Strategy to evaluate the performance of the cross-validated model on the test set.
n_jobs : int, optional (default None)
Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
refit : bool, optional (default True)
Refit an estimator using the best found parameters on the whole dataset.
cv : int, cross-validation generator or an iterable, optional (default None)
Determines the cross-validation splitting strategy.
verbose : int, optional (default 0)
Controls the verbosity.
pre_dispatch : int or str, optional (default "2*n_jobs")
Controls the number of jobs that get dispatched during parallel execution. Reducing this
number can be useful to avoid high memory usage.
random_state : int, RandomState instance or None, optional (default None)
Pseudo random number generator state used for random uniform sampling from lists
instead of ``scipy.stats`` distributions.
error_score : 'raise' or numeric, optional (default np.nan)
Value to assign to the score if an error occurs during fitting.
return_train_score : bool, optional (default False)
If ``False``, the ``cv_results_`` attribute will not include training scores.
Attributes
----------
trials_ : Trials
The trials object.
"""
_required_parameters = ["estimator", "space", "max_evals"]
def __init__(
self,
estimator,
space,
max_evals,
trials=None,
algo=None,
warm_start=False,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score=np.nan,
return_train_score=False,
):
"""Init method."""
super().__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
self.space = space
self.max_evals = max_evals
self.trials = trials
self.algo = algo
self.warm_start = warm_start
self.random_state = random_state
def _check_input_parameters(self, X, y=None, groups=None):
"""Run input checks.
Based on a similar method in :class:`sklearn.model_selection.BaseSuccessiveHalving`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" CV
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
Raises
------
ValueError
Raised if
* ``scoring`` is not a string or callable,
* ``y`` has less than two classes for a classification task,
* ``y`` contains complex data, or
* ``refit`` is not boolean.
"""
if self.scoring is not None and not (
isinstance(self.scoring, str) or callable(self.scoring)
):
raise ValueError(
"scoring parameter must be a string, "
"a callable or None. Multimetric scoring is not "
"supported."
)
check_array(X)
if is_classifier(self.estimator):
y = self._validate_data(X="no_validation", y=y)
check_classification_targets(y)
labels = unique_labels(y)
if len(labels) < 2:
raise ValueError(
"Classifier can't train when only one class is present."
)
if not isinstance(self.refit, bool):
raise ValueError(
f"refit is expected to be a boolean. Got {type(self.refit)} instead."
)
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" CV
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._check_input_parameters(
X=X,
y=y,
groups=groups,
)
super().fit(X, y=y, groups=groups, **fit_params)
return self
def _run_search(self, evaluate_candidates):
"""Run the ``hyperopt`` iterations.
Parameters
----------
evaluate_candidates : callable
Callable defined in :class:`sklearn.model_selection._search.BaseSearchCV`
that trains and scores the model across the cross-validation folds for the
given parameter space.
"""
def _evaluate(params):
results = evaluate_candidates([params])
return {
"loss": -results["mean_test_score"][-1],
"params": params,
"status": STATUS_OK,
}
if not self.warm_start:
self.trials_ = Trials()
else:
if not hasattr(self, "trials_"):
if self.trials is None:
self.trials_ = Trials()
else:
self.trials_ = self.trials
if isinstance(self.random_state, int):
seed = np.random.default_rng(self.random_state)
elif isinstance(self.random_state, np.random.Generator):
seed = self.random_state
elif self.random_state is None:
seed = None
else:
raise ValueError(
"Please supply a `numpy.random.Generator` or integer for `random_state`."
)
fmin(
_evaluate,
space=self.space,
algo=self.algo,
max_evals=self.max_evals,
rstate=seed,
trials=self.trials_,
)
| 9,643 | 34.19708 | 102 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.